diff --git a/.env.enc b/.env.enc new file mode 100644 index 00000000..4d3fbae6 --- /dev/null +++ b/.env.enc @@ -0,0 +1,15 @@ +LINEAR_API_KEY=ENC[AES256_GCM,data:vRXUs2ShBKmEyxD9yH09OwgtfJ7puOjSHPStCBnHDRN5ITAdkewg5p2JigOF4z9h,iv:2/NtDU6z33IU9T2N3IPlEjQ81OqsWn33cn8Bv9HNRpM=,tag:0De7RGq8d5yYBLAGpHvxgA==,type:str] +#ENC[AES256_GCM,data:FwVCNi0RYYsvqQ93h603+VqUTFSH4w==,iv:Ll9h37dJfAseA6oWWa2M84BSr6BNF4vdUvMsNLHgcyo=,tag:5mrtQtkKFk8myXaRgTGKGA==,type:comment] +SLACK_BOT_TOKEN=ENC[AES256_GCM,data:npHvNX/41Tzp9GzWKZJ/JMSotiZcMw8CpBx9Eoje4LKjWKDsyCV4cTAytdY8miyl7b9eRll5XcoUOhw=,iv:8zHB0uz1Xa8bmk6R0GlfRfA7RGoR2Bce8DrMnh89AFE=,tag:NAMlJrgJrc8cgRKIsU8cMw==,type:str] +SLACK_APP_TOKEN=ENC[AES256_GCM,data:+xX0KtSwC8Y/iTD1W5HCC8g0o/SWHNnRzJViEtrFZhrQUgt6EdkNo12FBtfIhZWpYyDJzkysdCX62DuPbLd6GvtYjVDIpulUYkF/71ehKM7TgdQYTlMSkLrlzSGxx734PuI=,iv:8d/JCXBm1TJLj83vVyZVf0Ejzlyb2hDRVnhLcKvp66U=,tag:Y7JaH+6joe8LW3dPaFtneA==,type:str] +#ENC[AES256_GCM,data:wL5ZCt4ipsixSaDkQK9nt62XdhvT+RIUlSulqHT/MGJx6FTtgfSjBLXSN88XXOWQygt6Pm18oA==,iv:SK+mmfjiRt5cq/ZCL8WScpjHLKU0a7yKIpi1nx6D5B8=,tag:0WAl/zsnxK726gbxei8N9Q==,type:comment] +#ENC[AES256_GCM,data:Lwg+4NujXm5WJDzgLyB8SCg/p2x8zpewQyWk7EewcbRzRcc4JNxsoqHKRyWgx7u4gOgi5sD3iphdw4ZHVGgtgoLoQFi+6MdazOpKsdzoapE8a5Cww+oWeJ8kJ1Y=,iv:2Nk/IBWOPDYXiTYXVBHyNvgAdxeU8DwSJcUx75CYnBQ=,tag:wgNcbkmUNxRYAb4Y4shsmA==,type:comment] +CHANNEL_PROJECT_MAP=ENC[AES256_GCM,data:nlIWKDmFmZAgwTJJnpOnMiondw4rfoILu1QOTLWtXSvxngL7fEQlvTdxM8FP9SnJiS8eorDbiUm+MWeknu3EVfU0E3kLJvY1AooRB2c3pYHKvLm4KuDK5XOFHhS5TSrZsm0EQWxJ54vSMvKRlA5qD4WSv/OT6dFlgkRm1QkJpCSoDbJMm4Tfmc4NFiueYQ9ftkHkgSZtK2RlGWliz2/rYKX2QlKCb3+h8rJchGbeYk11ob5MThztO66LKJ46KppRk3gG/5asqKof/tRGKepcrMOPQguNRI7OoTYDpstNkOWlbmVuwsBca5aYS6xuTdW2J4LiQNKnFMT6hoxSsZxrHIZ0pQ+kUQgAhfpdKcGEAKG1wOdriLWuz0N+1+Q=,iv:VcnNVyQTocHSPhw/ZG/gzei7xsQWEt5yP6tSEosxHTA=,tag:hvn88f983LBwXwOKG6H0lA==,type:str] +#ENC[AES256_GCM,data:OvZDECuGRW87fnNR/BQUlC25xe0ol8pBcm3hSIcN54NaK2UVg+v2zyxwseTUDcwxRe1U,iv:nylZjuu3I0LitRMpDpzi7tTwFkvSSWKUoN8P1xqWHRk=,tag:sopVFpFrIaz9S8HOMGFliQ==,type:comment] +CLAUDE_MODEL=ENC[AES256_GCM,data:hbkZYw==,iv:9yzUxI/bXKLlNTuVTLICdobV8+QZDuntByhetG3QmBw=,tag:ynabmmqv//0tSajcjhcvFw==,type:str] +sops_age__list_0__map_enc=-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxU3lVRmx5QnBhN29PRGRH\nTXhRd2RrYS91Y2JuelJqN2pXd1NTck9LYWpzCnBlWWlhNGhiTXBGREJkY2FqNUF6\nbDIvZ24xTXhZUEtPbXNNTGQ3RkFHZW8KLS0tIER0azVIalhOOUlqUjhpNzlqOHVE\nOUdiemtsWC90NzlRZUlXblpYSHJ1VEEKYFK5o+QjcTZh8awX7zM1gtiFGOxhENJa\noyrkrO0sbb+wftQ5VnS540JgGfjZ41woig2NR6BpRc0xfDyVm54aKw==\n-----END AGE ENCRYPTED FILE-----\n +sops_age__list_0__map_recipient=age17ud00xq42ckzfpmhxtwz4zy0vc50lsa3jfvulrt5zhe2pd94p9kqy775uc +sops_lastmodified=2026-03-23T20:36:48Z +sops_mac=ENC[AES256_GCM,data:vloTsrC6A7AYF/W+7WPRrejfK2oeq0G9zPmAGeAcvndv9eW59SVccL8gwiX7L2cp/1T/X/KLIsfEtO4j7aXOtIn2OFRLCgoXLgSbzVR1zoUQIY4sub8fcgQJBWYfwc4jdLbuaCcr4oKhdioneURNVE6a8L0zH79l7bGEHPPatFk=,iv:f75NhCH/eo9Eag+9rdTthd9KdJr5B0v+HHhf3dXpm74=,tag:b1KKr/7CJjz/MQLgXkI4KA==,type:str] +sops_unencrypted_suffix=_unencrypted +sops_version=3.12.2 diff --git a/.env.example b/.env.example new file mode 100644 index 00000000..d4519dae --- /dev/null +++ b/.env.example @@ -0,0 +1,11 @@ +# Slack bot credentials +SLACK_BOT_TOKEN=xoxb-your-bot-token +SLACK_APP_TOKEN=xapp-your-app-level-token + +# Channel ID → project directory mapping (JSON object) +# Example: {"C0123456789":"/home/user/projects/my-app","C9876543210":"/home/user/projects/other-app"} +CHANNEL_PROJECT_MAP={} + +# Claude Code model identifier (optional, defaults to "sonnet") +# Valid values: sonnet, opus, haiku +CLAUDE_MODEL=sonnet diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a7ae229c..2a9e0d0b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,6 +5,7 @@ on: branches: - main pull_request: + merge_group: jobs: test: diff --git a/.github/workflows/post-merge-gate.yml b/.github/workflows/post-merge-gate.yml new file mode 100644 index 00000000..537cdfb7 --- /dev/null +++ b/.github/workflows/post-merge-gate.yml @@ -0,0 +1,202 @@ +name: Post-Merge Gate + +on: + push: + branches: + - main + +permissions: + contents: write + +jobs: + gate: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.30.2 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Lint + id: lint + run: pnpm lint + + - name: Typecheck + id: typecheck + if: always() && steps.lint.outcome != 'cancelled' + run: pnpm typecheck + + - name: Test + id: test + if: always() && steps.typecheck.outcome != 'cancelled' + run: pnpm test + + - name: Build + id: build + if: always() && steps.test.outcome != 'cancelled' + run: pnpm build + + - name: Bump calver version + if: success() + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + # Retry loop handles race when concurrent merges push at the same time + for ATTEMPT in 1 2 3; do + git pull --rebase origin main + + # Read current version after pulling latest + CURRENT_VERSION=$(node -p "require('./package.json').version") + TODAY=$(date -u +"%Y.%m.%d") + + # Extract date prefix and sequence from current version + CURRENT_PREFIX=$(echo "$CURRENT_VERSION" | cut -d. -f1-3) + CURRENT_SEQ=$(echo "$CURRENT_VERSION" | cut -d. -f4) + + # Determine next sequence + if [ "$CURRENT_PREFIX" = "$TODAY" ]; then + NEXT_SEQ=$((CURRENT_SEQ + 1)) + else + NEXT_SEQ=1 + fi + + NEXT_VERSION="${TODAY}.${NEXT_SEQ}" + + # Update package.json version field directly (avoids npm lockfile side-effects) + node -e " + const fs = require('fs'); + const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8')); + pkg.version = '${NEXT_VERSION}'; + fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2) + '\n'); + " + + git add package.json + git commit -m "chore: bump calver to ${NEXT_VERSION} [skip ci]" + + if git push; then + echo "::notice::Bumped version to ${NEXT_VERSION}" + exit 0 + fi + + echo "::warning::Push failed (attempt ${ATTEMPT}/3), retrying..." + git reset --soft HEAD~1 + done + + echo "::error::Failed to push calver bump after 3 attempts" + exit 1 + + - name: Create Linear issue on failure + if: failure() + env: + LINEAR_API_KEY: ${{ secrets.LINEAR_API_KEY }} + run: | + COMMIT_SHA="${{ github.sha }}" + SHORT_SHA="${COMMIT_SHA:0:7}" + RUN_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + + # Extract PR number from merge commit message (GitHub format: "Merge pull request #N ..." or squash "... (#N)") + PR_NUMBER=$(git log -1 --pretty=%s | grep -oP '#\K[0-9]+' | head -1) + PR_INFO="" + if [ -n "$PR_NUMBER" ]; then + PR_INFO="**PR:** #${PR_NUMBER}" + fi + + # Determine which steps failed + FAILED_STEPS="" + if [ "${{ steps.lint.outcome }}" = "failure" ]; then + FAILED_STEPS="${FAILED_STEPS}\n- Lint" + fi + if [ "${{ steps.typecheck.outcome }}" = "failure" ]; then + FAILED_STEPS="${FAILED_STEPS}\n- Typecheck" + fi + if [ "${{ steps.test.outcome }}" = "failure" ]; then + FAILED_STEPS="${FAILED_STEPS}\n- Test" + fi + if [ "${{ steps.build.outcome }}" = "failure" ]; then + FAILED_STEPS="${FAILED_STEPS}\n- Build" + fi + + TITLE="pipeline-halt: post-merge gate failure on ${SHORT_SHA}" + + BODY="## Post-Merge Gate Failure\n\n**Commit:** ${COMMIT_SHA}\n${PR_INFO}\n**Run:** ${RUN_URL}\n\n**Failed steps:**${FAILED_STEPS}" + + # Look up SYMPH team ID and pipeline-halt label via Linear API + TEAM_QUERY='{ "query": "{ teams(filter: { key: { eq: \"SYMPH\" } }) { nodes { id } } }" }' + TEAM_RESPONSE=$(curl -s -X POST https://api.linear.app/graphql \ + -H "Content-Type: application/json" \ + -H "Authorization: ${LINEAR_API_KEY}" \ + -d "$TEAM_QUERY") + TEAM_ID=$(echo "$TEAM_RESPONSE" | jq -r '.data.teams.nodes[0].id') + + if [ -z "$TEAM_ID" ] || [ "$TEAM_ID" = "null" ]; then + echo "::error::Failed to look up SYMPH team ID from Linear API" + exit 1 + fi + + # Find or create the pipeline-halt label + LABEL_QUERY='{ "query": "{ issueLabels(filter: { name: { eq: \"pipeline-halt\" } }) { nodes { id } } }" }' + LABEL_RESPONSE=$(curl -s -X POST https://api.linear.app/graphql \ + -H "Content-Type: application/json" \ + -H "Authorization: ${LINEAR_API_KEY}" \ + -d "$LABEL_QUERY") + LABEL_ID=$(echo "$LABEL_RESPONSE" | jq -r '.data.issueLabels.nodes[0].id') + + LABEL_IDS_PARAM="" + if [ -n "$LABEL_ID" ] && [ "$LABEL_ID" != "null" ]; then + LABEL_IDS_PARAM=", labelIds: [\"${LABEL_ID}\"]" + else + # Create the label on the SYMPH team + CREATE_LABEL_QUERY=$(cat < config resolver -> orchestrator polls Linear -> creates workspace (after-create hook clones repo) -> renders prompt template with issue context -> dispatches agent run -> agent works in isolated workspace -> orchestrator manages state transitions back to Linear. + +**Key architectural decisions**: +- In-memory state only (no BullMQ/Redis) -- designed for 2-3 concurrent workers +- `strictVariables: true` on LiquidJS -- all template variables must be in render context +- Orchestrator is deliberately "dumb" -- review intelligence, failure classification, and feedback injection live in the agent layer (prompts + skills), not here +- `permissionMode: "bypassPermissions"` required for headless agent runs + +## Build & Run + +```bash +# Install dependencies +pnpm install + +# Build (compiles TypeScript to dist/) +pnpm build # or: npm run build + +# Run the pipeline for a specific product +./run-pipeline.sh +# Products: symphony, jony-agent, hs-data, hs-ui, hs-mobile, stickerlabs, household + +# Run directly (after building) +node dist/src/cli/main.js --acknowledge-high-trust-preview + +# Type check only +pnpm typecheck # or: npx tsc --noEmit + +# Lint +pnpm lint # Biome check + +# Auto-format +pnpm format # Biome format +``` + +No dev server -- this is a CLI tool. The D40 port table does not apply. + +## Conventions + +- **Runtime**: Node.js >= 22, pnpm >= 10, TypeScript strict mode, ES2023 target +- **Module system**: ESM (`"type": "module"`), NodeNext module resolution +- **Imports**: `import type { ... }` for type-only imports (`verbatimModuleSyntax: true`), `.js` extensions required for NodeNext +- **Formatting**: Biome -- spaces (not tabs), double quotes, semicolons always, trailing commas +- **Naming**: kebab-case for file names, PascalCase for types/interfaces, camelCase for functions/variables +- **Validation**: Zod for config/input validation at I/O boundaries +- **Templates**: LiquidJS for prompt rendering -- always pass all required variables (strictVariables is on) +- **Strict TS options**: `noUncheckedIndexedAccess`, `exactOptionalPropertyTypes`, `useUnknownInCatchVariables`, `noImplicitOverride` + +## Testing + +- **Framework**: Vitest +- **Run tests**: `pnpm test` (runs all 347 tests once via `node scripts/test.mjs`) +- **Watch mode**: `pnpm test:watch` +- **Location**: `tests/` directory, mirrors `src/` structure (e.g., `tests/orchestrator/core.test.ts` covers `src/orchestrator/core.ts`) +- **Fixtures**: `tests/fixtures/` for shared test data +- **Coverage**: All new code must have tests. Critical paths (orchestrator, config resolution, tracker) have thorough coverage. +- **Naming**: Test files named after the module they cover; individual test cases named after observable behavior. + +## Pipeline Notes + +### Critical: dist/ staleness + +**The pipeline runs from compiled `dist/`, NOT source.** If you modify source files but forget to rebuild, your changes will not take effect. `run-pipeline.sh` includes a staleness check that compares `src/` timestamps against `dist/src/cli/main.js`. Use `--auto-build` to rebuild automatically, or `--skip-build-check` to bypass. + +### Auto-generated files (never edit directly) +- `dist/` -- compiled output, regenerated by `pnpm build` +- `pipeline-config/workspaces/` -- runtime workspace directories (UUID-named) +- `pnpm-lock.yaml` -- dependency lock file (regenerated by `pnpm install`) + +### Required environment variables +- `LINEAR_API_KEY` -- Linear API token for tracker integration (loaded from `.env` by `run-pipeline.sh`) +- `REPO_URL` -- target repo URL for workspace cloning (set per-product in `run-pipeline.sh`, or override via env) + +### Fragile areas +- **`active_states` in WORKFLOW configs** must include ALL states set during execution (In Progress, In Review, Blocked, Resume). This bug has been hit 3 times -- missing a state causes silent failures. +- **LiquidJS `strictVariables: true`** -- any variable referenced in a prompt template that is not passed in the render context will throw. Always verify template variables match the context passed by `prompt-builder.ts`. +- **`scheduleRetry`** is used for both failures AND continuations -- the max retry limit must only count actual failures, not continuation retries. +- **Hook scripts** run with `cwd: workspacePath`, NOT the WORKFLOW.md location. Relative paths in hooks resolve against the workspace. +- **`issue.state`** is a string in LiquidJS context (via `toTemplateIssue`), not an object. Template conditionals must compare against string values. +- **`stall_timeout_ms`** default (5 min) is too short for Claude Code agents. Set to 900000 (15 min) in WORKFLOW configs. +- **Linear project slug** is the `slugId` UUID, not the team key. + +### Verify commands (must pass before any PR) +```bash +pnpm test # All 347 tests pass +pnpm build # Compiles without errors +pnpm typecheck # No type errors +pnpm lint # Biome passes +``` + +### Scope boundaries +- Do NOT add BullMQ, Redis, or external queue infrastructure -- in-memory state is a deliberate design choice at current scale +- Do NOT move review intelligence or failure classification into the orchestrator -- these belong in the agent layer (prompts + skills) +- Do NOT modify hook scripts without testing against actual workspace creation flow +- Do NOT commit secrets to `.env` in public contexts (current repo is private; audit before making public) +- Every non-Claude-Code component should be designed for removal when Anthropic ships equivalent features diff --git a/INVESTIGATION-BRIEF.md b/INVESTIGATION-BRIEF.md new file mode 100644 index 00000000..546eefa0 --- /dev/null +++ b/INVESTIGATION-BRIEF.md @@ -0,0 +1,118 @@ +# Investigation Brief +## Issue: SYMPH-57 — Consolidate spec-gen to produce 1-2 sub-issues for STANDARD specs + +## Objective +Update three spec-gen skill reference files to change the STANDARD tier task-count target from "2-6" to "1-2". Pipeline telemetry shows ~20 min fixed overhead per ticket regardless of complexity, so fewer larger tickets dramatically reduce total wall-clock time. No logic changes — only documentation/guidance text updates. + +## Relevant Files (ranked by importance) + +1. `~/.claude/skills/spec-gen/references/complexity-router.md` — Primary file. Contains the STANDARD tier definition, the decision tree, Rule 6 (task-count estimate guidance), and the Quick Reference Table. Four distinct locations need updating. +2. `~/.claude/skills/spec-gen/references/model-tendencies.md` — Contains "Task granularity mismatch" bullet and Spec Quality Checklist. Two locations need updating. +3. `~/.claude/skills/spec-gen/SKILL.md` — Step 4 Self-Review checklist references `2-6 for STANDARD`. One location needs updating. + +## Key Code Patterns + +- All files are plain Markdown — no code, no tests, no build step. +- Changes are simple string substitutions: `2-6` → `1-2` in STANDARD-context sentences. +- Be precise: the string `2-6` also appears in non-STANDARD contexts (e.g., "Touches 2-6 files" in the STANDARD Signals list) — do NOT change those. + +## Architecture Context + +These files are read by the `spec-gen` skill (a Claude slash command at `~/.claude/skills/spec-gen/SKILL.md`) during spec generation. They are guidance documents, not executable code. No tests, no imports, no CI pipeline applies to them directly. + +## Exact Changes Required + +### File 1: `~/.claude/skills/spec-gen/references/complexity-router.md` + +**Change 1 — Decision tree (line 13):** +``` +Before: │ ├── 1 capability, ≤6 tasks, clear scope → STANDARD +After: │ ├── 1 capability, ≤2 tasks, clear scope → STANDARD +``` + +**Change 2 — STANDARD definition (line 60):** +``` +Before: **Definition**: A single capability with clear scope that decomposes into 2-6 tasks. +After: **Definition**: A single capability with clear scope that decomposes into 1-2 tasks. +``` + +**Change 3 — Quick Reference Table (line 167):** +``` +Before: | Tasks | 0-1 | 2-6 | 7+ | +After: | Tasks | 0-1 | 1-2 | 7+ | +``` + +**Change 4 — Rule 6, Signal Detection (line 142):** +``` +Before: If you estimate 2-6 tasks → STANDARD. If you estimate 7+ tasks → COMPLEX. If you estimate 1 task → TRIVIAL (unless it's a behavioral change with verification needs). +After: If you estimate 1-2 tasks → STANDARD. If you estimate 3+ tasks → COMPLEX. If you estimate 1 task → TRIVIAL (unless it's a behavioral change with verification needs). +``` +Note: Rule 6 also updates the COMPLEX boundary from "7+" to "3+" to eliminate the 3-6 gap — this is consistent with the new 1-2 STANDARD definition and the "3+ capabilities → COMPLEX" spirit of the spec. If the issue intent is strictly "only change STANDARD, don't touch COMPLEX threshold," keep `7+` and add a TODO noting the gap. + +### File 2: `~/.claude/skills/spec-gen/references/model-tendencies.md` + +**Change 1 — Task granularity mismatch bullet (line 25):** +``` +Before: Target 2-6 tasks for STANDARD features. +After: Target 1-2 tasks for STANDARD features. +``` + +**Change 2 — Spec Quality Checklist (line 76):** +``` +Before: - [ ] Task count is appropriate for complexity tier (2-6 for STANDARD) +After: - [ ] Task count is appropriate for complexity tier (1-2 for STANDARD) +``` + +### File 3: `~/.claude/skills/spec-gen/SKILL.md` + +**Change 1 — Step 4 Self-Review checklist (line 288):** +``` +Before: - [ ] Task count matches complexity tier (2-6 for STANDARD, 7+ for COMPLEX) +After: - [ ] Task count matches complexity tier (1-2 for STANDARD, 7+ for COMPLEX) +``` + +## Test Strategy + +No automated tests. Validate with grep: +```bash +# Confirm no STANDARD-context "2-6" references remain: +grep -n "2-6" ~/.claude/skills/spec-gen/references/complexity-router.md +grep -n "2-6" ~/.claude/skills/spec-gen/references/model-tendencies.md +grep -n "2-6" ~/.claude/skills/spec-gen/SKILL.md + +# Confirm new "1-2" values are present in each file: +grep -n "1-2" ~/.claude/skills/spec-gen/references/complexity-router.md +grep -n "1-2" ~/.claude/skills/spec-gen/references/model-tendencies.md +grep -n "1-2" ~/.claude/skills/spec-gen/SKILL.md +``` + +Note: `complexity-router.md` has `2-6` in the STANDARD Signals list ("Touches 2-6 files") — this is a *file count* signal, NOT a task count. Do NOT change it. + +## Gotchas & Constraints + +- **Only change task-count references to "2-6"**, not file-count references. "Touches 2-6 files" in the STANDARD Signals section stays unchanged. +- **STANDARD examples table** (complexity-router.md lines 78-86) shows 2-4 estimated tasks per example. These are now inconsistent with the new 1-2 target but the issue spec does not mention updating them. Leave them as-is; optionally add a `` HTML comment. +- **Do not change COMPLEX threshold** in the Quick Reference Table unless the spec explicitly says to. The issue is ambiguous on Rule 6 — see Change 4 notes above. +- These files live in `~/.claude/skills/`, NOT in the symphony-ts repo. No PR is needed. Changes are applied directly. +- No build step, no tests, no migration. + +## Key Code Excerpts + +**complexity-router.md lines 12-14 (decision tree):** +``` +│ ├── How many capabilities does it touch? +│ │ ├── 1 capability, ≤6 tasks, clear scope → STANDARD ← change ≤6 to ≤2 +│ │ └── 2+ capabilities, OR architectural change, OR 7+ tasks → COMPLEX +``` + +**complexity-router.md lines 59-60 (STANDARD definition):** +``` +### STANDARD — Generate spec → parent issue in Draft → freeze to sub-issues + +**Definition**: A single capability with clear scope that decomposes into 2-6 tasks. ← change to 1-2 +``` + +**model-tendencies.md lines 24-26 (granularity mismatch):** +``` +- **Task granularity mismatch**: Either decomposes into too many tiny tasks (1 task per endpoint) or too few large tasks (1 task for entire feature). Target 2-6 tasks for STANDARD features. ← change to 1-2 +``` diff --git a/WORKFLOW.md b/WORKFLOW.md new file mode 100644 index 00000000..cd4ebd59 --- /dev/null +++ b/WORKFLOW.md @@ -0,0 +1,27 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 1fa66498be91 +workspace: + root: /tmp/symphony_workspaces +polling: + interval_ms: 15000 +agent: + max_concurrent_agents: 1 + max_turns: 5 +codex: + command: codex app-server + approval_policy: never +server: + port: 4321 +--- + +You are implementing work for Linear issue {{ issue.identifier }}. + +Rules: +1. Implement only what the ticket asks for. +2. Keep changes scoped and safe. +3. Do not add secrets or credentials to the repository. + +When finished, update the Linear issue state to "Done" using the `linear_graphql` tool. diff --git a/biome.json b/biome.json index 19a6477e..aff96c55 100644 --- a/biome.json +++ b/biome.json @@ -2,7 +2,7 @@ "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json", "files": { "ignoreUnknown": true, - "ignore": ["dist/**", "node_modules/**"] + "ignore": ["dist/**", "node_modules/**", "pipeline-config/**"] }, "formatter": { "enabled": true, @@ -23,5 +23,17 @@ "semicolons": "always", "trailingCommas": "all" } - } + }, + "overrides": [ + { + "include": ["**/*.test.ts", "**/tests/**"], + "linter": { + "rules": { + "style": { + "noNonNullAssertion": "off" + } + } + } + } + ] } diff --git a/docs/conformance-test-matrix.md b/docs/conformance-test-matrix.md index 08fd7f31..08443cde 100644 --- a/docs/conformance-test-matrix.md +++ b/docs/conformance-test-matrix.md @@ -65,10 +65,13 @@ tool handling, and the optional `linear_graphql` dynamic tool extension. - `tests/logging/session-metrics.test.ts` - `tests/logging/runtime-snapshot.test.ts` - `tests/observability/dashboard-server.test.ts` +- `tests/orchestrator/runtime-host.test.ts` (poll_tick_completed event) Covered behaviors include operator-visible validation failures via runtime surfaces, structured log context fields, sink failure isolation, token and -rate-limit aggregation, and the operator dashboard APIs. +rate-limit aggregation, the operator dashboard APIs, and the `poll_tick_completed` +structured log event emitted after each successful poll tick (including +`dispatched_count`, `running_count`, `reconciled_stop_requests`, and `duration_ms`). ## 17.7 CLI and Host Lifecycle diff --git a/linear_workpad.py b/linear_workpad.py new file mode 100644 index 00000000..3c8e1896 --- /dev/null +++ b/linear_workpad.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +import urllib.request +import urllib.error +import json +import os +import sys + +LINEAR_API_KEY = os.environ["LINEAR_API_KEY"] +ISSUE_ID = "7b4cc9a1-e014-4463-8cab-78bce7cfa7d0" + +WORKPAD_CONTENT = r"""## Workpad +**Environment**: pro14:/Users/ericlitman/intent/workspaces/architecture-build/repo/symphony-ts@8d4e5b7 + +### Plan +- [ ] **Step 1: Add `poll_tick_completed` to `ORCHESTRATOR_EVENTS` in `src/domain/model.ts`** + - Insert `"poll_tick_completed"` into the array after `"poll_tick"` + +- [ ] **Step 2: Add new log fields to `LOG_FIELDS` in `src/logging/fields.ts`** + - Add `"dispatched_count"`, `"running_count"`, `"reconciled_stop_requests"` to the `LOG_FIELDS` array + +- [ ] **Step 3: Extend `PollTickResult` in `src/orchestrator/core.ts` to include `runningCount`** + - `PollTickResult` already has `dispatchedIssueIds: string[]` and `stopRequests: StopRequest[]` + - Add `runningCount: number` field + - In all three return sites of `pollTick()`, set `runningCount: Object.keys(this.state.running).length` + - Note: `stopRequests` already provides reconciliation stop count, `dispatchedIssueIds.length` provides dispatch count + +- [ ] **Step 4: Add timing in `runPollCycle()` in `src/orchestrator/runtime-host.ts`** + - Before `runtimeHost.pollOnce()`, record `const tickStart = Date.now()` + - After `pollOnce()` returns, compute `durationMs = Date.now() - tickStart` + - Pass `durationMs` to `logPollCycleResult(logger, result, durationMs)` + +- [ ] **Step 5: Update `logPollCycleResult()` signature and body in `src/orchestrator/runtime-host.ts`** + - Add `durationMs: number` parameter + - After the existing warn/error checks, emit an info-level `poll_tick_completed` event: + ```typescript + await logger.info("poll_tick_completed", "Poll tick completed.", { + dispatched_count: result.dispatchedIssueIds.length, + running_count: result.runningCount, + reconciled_stop_requests: result.stopRequests.length, + duration_ms: durationMs, + }); + ``` + +- [ ] **Step 6: Add tests in `tests/orchestrator/runtime-host.test.ts`** + - New describe block for poll tick logging + - Test 1: `poll_tick_completed` event is logged after a successful poll (using `startRuntimeService`) + - Test 2: `dispatched_count` reflects the number of newly dispatched issues + - Verify `running_count` and `reconciled_stop_requests` fields are present and numeric + +### Acceptance Criteria +- [ ] `poll_tick_completed` in `ORCHESTRATOR_EVENTS` +- [ ] `dispatched_count`, `running_count`, `reconciled_stop_requests` in `LOG_FIELDS` +- [ ] `PollTickResult` has `runningCount: number` and all return sites populate it +- [ ] `logPollCycleResult` emits `poll_tick_completed` info event with all four fields +- [ ] `runPollCycle` times the `pollOnce()` call and passes duration +- [ ] Test: `poll_tick_completed` event is logged after successful poll +- [ ] Test: `dispatched_count` reflects newly dispatched issues +- [ ] All existing tests pass +- [ ] `npx tsc --noEmit` passes + +### Validation +- `pnpm test` +- `npx tsc --noEmit` +- `pnpm lint` + +### Notes +- 2026-03-20 Investigation complete. Plan posted. +- `PollTickResult.dispatchedIssueIds` is already `string[]` — use `.length` for `dispatched_count` +- `PollTickResult.stopRequests` is already `StopRequest[]` — use `.length` for `reconciled_stop_requests` +- `runningCount` must be added to `PollTickResult`; it is computed as `Object.keys(this.state.running).length` at the end of `pollTick()` in `core.ts` +- The `logPollCycleResult` function currently takes `(logger, result)` and uses `Awaited>` as the result type — need to add `durationMs: number` parameter +- `duration_ms` already exists in `LOG_FIELDS`, so no new field needed for it +- The three early-return paths in `pollTick()` must all include `runningCount` +""" + +def graphql(query, variables=None): + payload = json.dumps({"query": query, "variables": variables or {}}).encode("utf-8") + req = urllib.request.Request( + "https://api.linear.app/graphql", + data=payload, + headers={ + "Content-Type": "application/json", + "Authorization": LINEAR_API_KEY, + }, + method="POST", + ) + with urllib.request.urlopen(req) as resp: + return json.loads(resp.read()) + +# Step 1: Query existing comments +result = graphql(""" +query GetComments($issueId: String!) { + issue(id: $issueId) { + comments { + nodes { + id + body + } + } + } +} +""", {"issueId": ISSUE_ID}) + +print("Query result:", json.dumps(result, indent=2)) + +comments = result.get("data", {}).get("issue", {}).get("comments", {}).get("nodes", []) +existing = next((c for c in comments if "## Workpad" in c["body"]), None) + +if existing: + print(f"\nFound existing workpad comment: {existing['id']}") + update_result = graphql(""" +mutation UpdateComment($id: String!, $body: String!) { + commentUpdate(id: $id, input: { body: $body }) { + success + comment { + id + } + } +} +""", {"id": existing["id"], "body": WORKPAD_CONTENT}) + print("Update result:", json.dumps(update_result, indent=2)) + print(f"\nACTION: updated") + print(f"COMMENT_ID: {existing['id']}") +else: + print("\nNo existing workpad comment found, creating new one...") + create_result = graphql(""" +mutation CreateComment($issueId: String!, $body: String!) { + commentCreate(input: { issueId: $issueId, body: $body }) { + success + comment { + id + } + } +} +""", {"issueId": ISSUE_ID, "body": WORKPAD_CONTENT}) + print("Create result:", json.dumps(create_result, indent=2)) + new_id = create_result.get("data", {}).get("commentCreate", {}).get("comment", {}).get("id") + print(f"\nACTION: created") + print(f"COMMENT_ID: {new_id}") diff --git a/ops/com.slack-bridge.plist b/ops/com.slack-bridge.plist new file mode 100644 index 00000000..614b4bf2 --- /dev/null +++ b/ops/com.slack-bridge.plist @@ -0,0 +1,64 @@ + + + + + + Label + com.slack-bridge + + ProgramArguments + + /opt/homebrew/bin/node + /path/to/symphony-ts/dist/src/cli/main.js + + + WorkingDirectory + /path/to/symphony-ts + + EnvironmentVariables + + PATH + /opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin + HOME + /Users/youruser + NODE_ENV + production + SLACK_BOT_TOKEN + xoxb-xxxxx + SLACK_SIGNING_SECRET + xxxxx + CHANNEL_PROJECT_MAP + {"C123":"/path/to/project"} + + + StandardOutPath + ~/Library/Logs/slack-bridge/stdout.log + + StandardErrorPath + ~/Library/Logs/slack-bridge/stderr.log + + RunAtLoad + + + KeepAlive + + SuccessfulExit + + + + ThrottleInterval + 60 + + SoftResourceLimits + + NumberOfFiles + 4096 + + + ProcessType + Background + + diff --git a/ops/com.symphony.example.plist b/ops/com.symphony.example.plist new file mode 100644 index 00000000..251f5552 --- /dev/null +++ b/ops/com.symphony.example.plist @@ -0,0 +1,68 @@ + + + + + + Label + com.symphony.example + + ProgramArguments + + /opt/homebrew/bin/node + /path/to/symphony-ts/dist/src/cli/main.js + /path/to/symphony-ts/WORKFLOW.md + --acknowledge-high-trust-preview + --logs-root + ~/Library/Logs/symphony/example/ + + + WorkingDirectory + /path/to/symphony-ts + + EnvironmentVariables + + PATH + /opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin + HOME + /Users/youruser + NODE_ENV + production + LINEAR_API_KEY + lin_api_xxxxx + LINEAR_PROJECT_SLUG + your-project-slug + REPO_URL + https://github.com/org/repo.git + + + StandardOutPath + ~/Library/Logs/symphony/example/stdout.log + + StandardErrorPath + ~/Library/Logs/symphony/example/stderr.log + + RunAtLoad + + + KeepAlive + + SuccessfulExit + + + + ThrottleInterval + 60 + + SoftResourceLimits + + NumberOfFiles + 4096 + + + ProcessType + Background + + diff --git a/ops/com.symphony.newsyslog.conf b/ops/com.symphony.newsyslog.conf new file mode 100644 index 00000000..c670f1af --- /dev/null +++ b/ops/com.symphony.newsyslog.conf @@ -0,0 +1,19 @@ +# DEPRECATED — superseded by `token-report.sh rotate` (SYMPH-131) +# +# This newsyslog config is no longer used. Log rotation for symphony logs +# is now handled by `token-report.sh rotate` / `token-report.mjs rotate`, +# which runs as part of the daily pipeline. The two cannot coexist because +# newsyslog renames/truncates without HWM coordination. +# +# To remove this config if previously installed: +# sudo rm /usr/local/etc/newsyslog.d/com.symphony.newsyslog.conf +# +# Original config (preserved for reference): +# Install: sudo cp ops/com.symphony.newsyslog.conf /etc/newsyslog.d/ +# +# Fields: logfile owner:group mode count size(KB) when flags +# - Rotates at 10MB, keeps 5 archives, compresses old logs (J = bzip2) +# - Wildcard (*) matches any project name under ~/Library/Logs/symphony/ + +/Users/*/Library/Logs/symphony/*/stdout.log : 644 5 10240 * J +/Users/*/Library/Logs/symphony/*/stderr.log : 644 5 10240 * J diff --git a/ops/com.symphony.report-server.plist b/ops/com.symphony.report-server.plist new file mode 100644 index 00000000..217a7253 --- /dev/null +++ b/ops/com.symphony.report-server.plist @@ -0,0 +1,54 @@ + + + + + + Label + com.symphony.report-server + + ProgramArguments + + /usr/bin/python3 + -m + http.server + 8090 + --directory + /Users/clawdilize/.symphony/reports + + + WorkingDirectory + /Users/clawdilize/.symphony/reports + + EnvironmentVariables + + PATH + /usr/bin:/usr/sbin:/bin:/sbin + HOME + /Users/clawdilize + + + StandardOutPath + /Users/clawdilize/.symphony/logs/report-server-stdout.log + + StandardErrorPath + /Users/clawdilize/.symphony/logs/report-server-stderr.log + + KeepAlive + + + ThrottleInterval + 10 + + ProcessType + Background + + diff --git a/ops/com.symphony.token-report.plist b/ops/com.symphony.token-report.plist new file mode 100644 index 00000000..72b834d6 --- /dev/null +++ b/ops/com.symphony.token-report.plist @@ -0,0 +1,61 @@ + + + + + + Label + com.symphony.token-report + + ProgramArguments + + /bin/bash + /Users/clawdilize/projects/symphony-ts/ops/token-report.sh + daily + + + WorkingDirectory + /Users/clawdilize/projects/symphony-ts + + StartCalendarInterval + + Hour + 6 + Minute + 0 + + + EnvironmentVariables + + PATH + /opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin + HOME + /Users/clawdilize + NODE_ENV + production + + + StandardOutPath + /Users/clawdilize/.symphony/logs/token-report-stdout.log + + StandardErrorPath + /Users/clawdilize/.symphony/logs/token-report-stderr.log + + KeepAlive + + + ProcessType + Background + + diff --git a/ops/slack-bridge-ctl b/ops/slack-bridge-ctl new file mode 100755 index 00000000..90140865 --- /dev/null +++ b/ops/slack-bridge-ctl @@ -0,0 +1,225 @@ +#!/usr/bin/env bash +set -euo pipefail + +# slack-bridge-ctl — manage the Slack bridge as a macOS launchd service +# Usage: slack-bridge-ctl {install|uninstall|start|stop|restart|status|logs|tail|cleanup} + +SCRIPT_DIR="$(cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")" && pwd)" +SYMPHONY_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Defaults — override via environment or .env +SERVICE_LABEL="com.slack-bridge" +PLIST_PATH="$HOME/Library/LaunchAgents/${SERVICE_LABEL}.plist" +LOG_DIR="$HOME/Library/Logs/slack-bridge" +ENV_FILE="${SLACK_BRIDGE_ENV_FILE:-$SYMPHONY_ROOT/.env}" +NODE_BIN="${SLACK_BRIDGE_NODE:-$(which node 2>/dev/null || echo /opt/homebrew/bin/node)}" +CLI_JS="$SYMPHONY_ROOT/dist/src/slack-bot/server.js" + +# Colors (disabled if not a terminal) +if [[ -t 1 ]]; then + RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[0;33m'; CYAN='\033[0;36m'; NC='\033[0m' +else + RED=''; GREEN=''; YELLOW=''; CYAN=''; NC='' +fi + +info() { echo -e "${CYAN}▸${NC} $*"; } +ok() { echo -e "${GREEN}✓${NC} $*"; } +warn() { echo -e "${YELLOW}⚠${NC} $*" >&2; } +die() { echo -e "${RED}✗${NC} $*" >&2; exit 1; } + +# --- Precondition checks --- + +check_node() { + [[ -x "$NODE_BIN" ]] || die "Node not found at $NODE_BIN. Set SLACK_BRIDGE_NODE or install Node >= 22." +} + +check_built() { + [[ -f "$CLI_JS" ]] || die "Built CLI not found at $CLI_JS. Run 'pnpm build' in $SYMPHONY_ROOT first." +} + +check_env_file() { + [[ -f "$ENV_FILE" ]] || die ".env file not found at $ENV_FILE. Set SLACK_BRIDGE_ENV_FILE to override." +} + +check_not_installed() { + [[ ! -f "$PLIST_PATH" ]] || die "Service already installed at $PLIST_PATH. Run 'uninstall' first." +} + +check_installed() { + [[ -f "$PLIST_PATH" ]] || die "Service not installed. Run 'install' first." +} + +# --- .env → plist EnvironmentVariables --- + +generate_env_dict() { + local env_dict="" + while IFS= read -r line || [[ -n "$line" ]]; do + # Skip comments and blank lines + [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" ]] && continue + + local key="${line%%=*}" + local value="${line#*=}" + # Remove surrounding quotes from value + value="${value#\"}" ; value="${value%\"}" + value="${value#\'}" ; value="${value%\'}" + # Strip inline comments + value="${value%% \#*}" + + env_dict+=" ${key}"$'\n' + env_dict+=" ${value}"$'\n' + done < "$ENV_FILE" + echo "$env_dict" +} + +# --- Commands --- + +cmd_install() { + check_node + check_built + check_env_file + check_not_installed + + info "Installing $SERVICE_LABEL ..." + + mkdir -p "$LOG_DIR" + mkdir -p "$(dirname "$PLIST_PATH")" + + local env_dict + env_dict="$(generate_env_dict)" + + cat > "$PLIST_PATH" < + + + + Label + ${SERVICE_LABEL} + + ProgramArguments + + ${NODE_BIN} + ${CLI_JS} + + + WorkingDirectory + ${SYMPHONY_ROOT} + + EnvironmentVariables + + PATH + /opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin + HOME + ${HOME} + NODE_ENV + production +${env_dict} + + StandardOutPath + ${LOG_DIR}/stdout.log + + StandardErrorPath + ${LOG_DIR}/stderr.log + + RunAtLoad + + + KeepAlive + + SuccessfulExit + + + + ThrottleInterval + 60 + + SoftResourceLimits + + NumberOfFiles + 4096 + + + ProcessType + Background + + +PLIST + + ok "Plist written to $PLIST_PATH" + info "Run 'slack-bridge-ctl start' to start the service." +} + +cmd_uninstall() { + check_installed + cmd_stop 2>/dev/null || true + rm -f "$PLIST_PATH" + ok "Service uninstalled." +} + +cmd_start() { + check_installed + launchctl load "$PLIST_PATH" + ok "Service started." +} + +cmd_stop() { + check_installed + launchctl unload "$PLIST_PATH" 2>/dev/null || true + ok "Service stopped." +} + +cmd_restart() { + cmd_stop + cmd_start +} + +cmd_status() { + if launchctl list "$SERVICE_LABEL" &>/dev/null; then + ok "Service is running." + launchctl list "$SERVICE_LABEL" + else + warn "Service is not running." + fi +} + +cmd_logs() { + if [[ -f "$LOG_DIR/stdout.log" ]]; then + cat "$LOG_DIR/stdout.log" + else + warn "No stdout log found at $LOG_DIR/stdout.log" + fi + if [[ -f "$LOG_DIR/stderr.log" ]]; then + echo "--- stderr ---" + cat "$LOG_DIR/stderr.log" + fi +} + +cmd_tail() { + tail -f "$LOG_DIR/stdout.log" "$LOG_DIR/stderr.log" 2>/dev/null || die "No log files found in $LOG_DIR" +} + +cmd_cleanup() { + info "Cleaning up logs in $LOG_DIR ..." + rm -f "$LOG_DIR"/*.log + ok "Logs cleaned." +} + +# --- Main --- + +case "${1:-}" in + install) cmd_install ;; + uninstall) cmd_uninstall ;; + start) cmd_start ;; + stop) cmd_stop ;; + restart) cmd_restart ;; + status) cmd_status ;; + logs) cmd_logs ;; + tail) cmd_tail ;; + cleanup) cmd_cleanup ;; + *) + echo "Usage: $(basename "$0") {install|uninstall|start|stop|restart|status|logs|tail|cleanup}" + exit 1 + ;; +esac diff --git a/ops/symphony-ctl b/ops/symphony-ctl new file mode 100755 index 00000000..9b1f9c00 --- /dev/null +++ b/ops/symphony-ctl @@ -0,0 +1,1104 @@ +#!/usr/bin/env bash +set -euo pipefail + +# symphony-ctl — manage symphony-ts as a macOS launchd service +# Usage: symphony-ctl {install|uninstall|start|stop|restart|status|logs|tail|cleanup} + +SCRIPT_DIR="$(cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")" && pwd)" +SYMPHONY_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Defaults — override via environment or .env +SYMPHONY_PROJECT="${SYMPHONY_PROJECT:-symphony}" +SERVICE_LABEL="com.symphony.${SYMPHONY_PROJECT}" +PLIST_PATH="$HOME/Library/LaunchAgents/${SERVICE_LABEL}.plist" +LOG_DIR="$HOME/Library/Logs/symphony/${SYMPHONY_PROJECT}" +ENV_FILE="${SYMPHONY_ENV_FILE:-$SYMPHONY_ROOT/.env}" +WORKFLOW_PATH="${SYMPHONY_WORKFLOW:-$SYMPHONY_ROOT/pipeline-config/workflows/WORKFLOW-${SYMPHONY_PROJECT}.md}" +NODE_BIN="${SYMPHONY_NODE:-$(which node 2>/dev/null || echo /opt/homebrew/bin/node)}" +CLI_JS="$SYMPHONY_ROOT/dist/src/cli/main.js" + +# Colors (disabled if not a terminal) +if [[ -t 1 ]]; then + RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[0;33m'; CYAN='\033[0;36m'; NC='\033[0m' +else + RED=''; GREEN=''; YELLOW=''; CYAN=''; NC='' +fi + +info() { echo -e "${CYAN}▸${NC} $*"; } +ok() { echo -e "${GREEN}✓${NC} $*"; } +warn() { echo -e "${YELLOW}⚠${NC} $*" >&2; } +die() { echo -e "${RED}✗${NC} $*" >&2; exit 1; } + +# --- Precondition checks --- + +check_node() { + [[ -x "$NODE_BIN" ]] || die "Node not found at $NODE_BIN. Set SYMPHONY_NODE or install Node >= 22." +} + +check_built() { + [[ -f "$CLI_JS" ]] || die "Built CLI not found at $CLI_JS. Run 'pnpm build' in $SYMPHONY_ROOT first." +} + +check_env_file() { + [[ -f "$ENV_FILE" ]] || die ".env file not found at $ENV_FILE. Set SYMPHONY_ENV_FILE to override." +} + +check_workflow() { + [[ -f "$WORKFLOW_PATH" ]] || die "WORKFLOW.md not found at $WORKFLOW_PATH. Set SYMPHONY_WORKFLOW to override." +} + +check_not_installed() { + [[ ! -f "$PLIST_PATH" ]] || die "Service already installed at $PLIST_PATH. Run 'uninstall' first." +} + +check_installed() { + [[ -f "$PLIST_PATH" ]] || die "Service not installed. Run 'install' first." +} + +# --- .env → plist EnvironmentVariables --- + +generate_env_dict() { + local env_dict="" + while IFS= read -r line || [[ -n "$line" ]]; do + # Skip comments and blank lines + [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue + # Trim leading/trailing whitespace (bash-native, preserves quotes) + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" ]] && continue + + local key="${line%%=*}" + local value="${line#*=}" + # Remove surrounding quotes from value + value="${value#\"}" ; value="${value%\"}" + value="${value#\'}" ; value="${value%\'}" + # Strip inline comments (only unquoted: space then #) + # Only strip if value was not quoted (quotes already removed above) + value="${value%% \#*}" + + env_dict+=" ${key}"$'\n' + env_dict+=" ${value}"$'\n' + done < "$ENV_FILE" + echo "$env_dict" +} + +# --- plist generation --- + +generate_plist() { + local env_dict + env_dict="$(generate_env_dict)" + + cat < + + + + Label + ${SERVICE_LABEL} + + ProgramArguments + + ${NODE_BIN} + ${CLI_JS} + ${WORKFLOW_PATH} + --acknowledge-high-trust-preview + --logs-root + ${LOG_DIR} + + + WorkingDirectory + ${SYMPHONY_ROOT} + + EnvironmentVariables + + PATH + /opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin + HOME + ${HOME} + NODE_ENV + production +${env_dict} + + StandardOutPath + ${LOG_DIR}/stdout.log + + StandardErrorPath + ${LOG_DIR}/stderr.log + + RunAtLoad + + + KeepAlive + + SuccessfulExit + + + + ThrottleInterval + 60 + + SoftResourceLimits + + NumberOfFiles + 4096 + + + ProcessType + Background + + +PLIST +} + +# --- Commands --- + +cmd_install() { + check_node + check_built + check_env_file + check_workflow + check_not_installed + + mkdir -p "$LOG_DIR" + mkdir -p "$(dirname "$PLIST_PATH")" + + generate_plist > "$PLIST_PATH" + ok "Plist written to $PLIST_PATH" + + launchctl bootstrap "gui/$(id -u)" "$PLIST_PATH" + ok "Service registered: $SERVICE_LABEL" + info "Run 'symphony-ctl start' to begin polling." +} + +cmd_uninstall() { + check_installed + + # Stop first if running + if launchctl print "gui/$(id -u)/${SERVICE_LABEL}" &>/dev/null; then + launchctl kill SIGTERM "gui/$(id -u)/${SERVICE_LABEL}" 2>/dev/null || true + sleep 1 + fi + + launchctl bootout "gui/$(id -u)/${SERVICE_LABEL}" 2>/dev/null || true + rm -f "$PLIST_PATH" + ok "Service uninstalled: $SERVICE_LABEL" +} + +cmd_start() { + check_installed + + if is_running; then + warn "Service is already running." + return 0 + fi + + launchctl kickstart "gui/$(id -u)/${SERVICE_LABEL}" + ok "Service started: $SERVICE_LABEL" + info "Dashboard: http://localhost:$(get_port)" + info "Logs: $LOG_DIR/" +} + +cmd_stop() { + check_installed + + if ! is_running; then + warn "Service is not running." + return 0 + fi + + launchctl kill SIGTERM "gui/$(id -u)/${SERVICE_LABEL}" + ok "Service stopped: $SERVICE_LABEL" +} + +cmd_restart() { + check_installed + + if is_running; then + cmd_stop + sleep 1 + fi + cmd_start +} + +cmd_status() { + if [[ ! -f "$PLIST_PATH" ]]; then + info "Service not installed." + return 0 + fi + + echo "" + info "Service: $SERVICE_LABEL" + info "Plist: $PLIST_PATH" + info "Workflow: $WORKFLOW_PATH" + info "Logs: $LOG_DIR/" + info "Dashboard: http://localhost:$(get_port)" + echo "" + + if is_running; then + local pid + pid="$(get_pid)" + ok "Running (PID ${pid:-unknown})" + else + warn "Not running" + fi + + # Show last few lines of stderr if available + if [[ -f "$LOG_DIR/stderr.log" ]]; then + local size + size="$(wc -c < "$LOG_DIR/stderr.log" | tr -d ' ')" + if [[ "$size" -gt 0 ]]; then + echo "" + info "Last 5 lines of stderr.log:" + tail -5 "$LOG_DIR/stderr.log" | sed 's/^/ /' + fi + fi +} + +cmd_logs() { + if [[ ! -d "$LOG_DIR" ]]; then + die "Log directory not found: $LOG_DIR" + fi + + local log_file="${1:-stderr}" + local log_path="$LOG_DIR/${log_file}.log" + + [[ -f "$log_path" ]] || die "Log file not found: $log_path" + less +G "$log_path" +} + +cmd_tail() { + if [[ ! -d "$LOG_DIR" ]]; then + die "Log directory not found: $LOG_DIR" + fi + + info "Tailing stderr.log (Ctrl-C to stop)..." + tail -f "$LOG_DIR/stderr.log" "$LOG_DIR/stdout.log" 2>/dev/null +} + +# --- Helpers --- + +is_running() { + launchctl print "gui/$(id -u)/${SERVICE_LABEL}" &>/dev/null && \ + launchctl print "gui/$(id -u)/${SERVICE_LABEL}" 2>/dev/null | grep -q 'pid = [0-9]' +} + +get_pid() { + launchctl print "gui/$(id -u)/${SERVICE_LABEL}" 2>/dev/null | grep -oE 'pid = [0-9]+' | grep -oE '[0-9]+' +} + +get_port() { + # Extract port from WORKFLOW frontmatter + if [[ -f "$WORKFLOW_PATH" ]]; then + local port + port="$(sed -n '/^---$/,/^---$/p' "$WORKFLOW_PATH" | grep -E '^\s*port:' | head -1 | awk '{print $2}')" + echo "${port:-4321}" + else + echo "4321" + fi +} + +# --- Log rotation --- + +cmd_install_logrotate() { + local conf_src="$SCRIPT_DIR/com.symphony.newsyslog.conf" + local conf_dest="/etc/newsyslog.d/com.symphony.newsyslog.conf" + + [[ -f "$conf_src" ]] || die "newsyslog config not found at $conf_src" + + info "Installing log rotation config to $conf_dest" + sudo cp "$conf_src" "$conf_dest" + ok "Log rotation installed. Logs rotate at 10MB, keep 5 archives." + info "newsyslog checks this automatically — no restart needed." +} + +# --- Cleanup --- + +cmd_cleanup() { + local execute=false + local skip_github=false + local skip_linear=false + local prune_branches=false + + # Parse flags + while [[ $# -gt 0 ]]; do + case "$1" in + --execute) execute=true ;; + --skip-github) skip_github=true ;; + --skip-linear) skip_linear=true ;; + --prune-branches) prune_branches=true ;; + *) die "Unknown flag: $1" ;; + esac + shift + done + + # Load env if not already set + if [[ -f "$ENV_FILE" ]]; then + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" ]] && continue + local key="${line%%=*}" + local value="${line#*=}" + value="${value#\"}" ; value="${value%\"}" + value="${value#\'}" ; value="${value%\'}" + value="${value%% \#*}" + # Only export if not already set + if [[ -z "${!key:-}" ]]; then + export "$key=$value" + fi + done < "$ENV_FILE" + fi + + check_workflow + + # Parse WORKFLOW frontmatter for project_slug and workspace root + local frontmatter + frontmatter="$(sed -n '/^---$/,/^---$/p' "$WORKFLOW_PATH")" + + local project_slug + project_slug="$(echo "$frontmatter" | grep -E '^\s*project_slug:' | head -1 | awk '{print $2}')" + [[ -n "$project_slug" ]] || die "Could not parse project_slug from $WORKFLOW_PATH frontmatter" + + local workspace_root_raw + workspace_root_raw="$(echo "$frontmatter" | grep -E '^\s*root:' | head -1 | awk '{print $2}')" + [[ -n "$workspace_root_raw" ]] || die "Could not parse workspace.root from $WORKFLOW_PATH frontmatter" + + # Resolve workspace root relative to WORKFLOW_PATH directory + local workflow_dir + workflow_dir="$(cd "$(dirname "$WORKFLOW_PATH")" && pwd)" + local workspace_root + if [[ "$workspace_root_raw" == /* ]]; then + workspace_root="$workspace_root_raw" + else + workspace_root="$workflow_dir/$workspace_root_raw" + fi + + # Extract GitHub owner/repo from REPO_URL + local github_repo="" + if [[ -n "${REPO_URL:-}" ]]; then + github_repo="$(echo "$REPO_URL" | sed -E 's|^https?://github\.com/||; s|\.git$||')" + fi + + local dashboard_port + dashboard_port="$(get_port)" + + if $execute; then + info "symphony-ctl cleanup [EXECUTING]" + else + info "symphony-ctl cleanup [DRY RUN]" + fi + echo "" + + local count_workspaces=0 + local count_prs=0 + local count_logs=0 + local count_stale_issues=0 + + # --- 1. Local workspaces --- + info "Local workspaces:" + if [[ -d "$workspace_root" ]]; then + local has_workspaces=false + for dir in "$workspace_root"/*/; do + [[ -d "$dir" ]] || continue + local uuid + uuid="$(basename "$dir")" + # Only process UUID-shaped directories + [[ "$uuid" =~ ^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$ ]] || continue + has_workspaces=true + + if $skip_linear || [[ -z "${LINEAR_API_KEY:-}" ]]; then + warn " $uuid — skipped (Linear not available)" + continue + fi + + # Query Linear for the issue state + local response + response="$(curl -s -X POST https://api.linear.app/graphql \ + -H "Content-Type: application/json" \ + -H "Authorization: $LINEAR_API_KEY" \ + -d "{\"query\":\"{ issue(id: \\\"$uuid\\\") { identifier title state { name type } } }\"}" 2>/dev/null)" || true + + local issue_id issue_state state_type + issue_id="$(echo "$response" | grep -o '"identifier":"[^"]*"' | head -1 | cut -d'"' -f4)" + issue_state="$(echo "$response" | grep -o '"name":"[^"]*"' | head -1 | cut -d'"' -f4)" + state_type="$(echo "$response" | grep -o '"type":"[^"]*"' | head -1 | cut -d'"' -f4)" + + if [[ -z "$issue_id" ]]; then + # Issue not found in Linear + if $execute; then + rm -rf "$dir" + ok " $uuid (issue not found) — removed" + else + ok " $uuid (issue not found) — would remove" + fi + count_workspaces=$((count_workspaces + 1)) + elif [[ "$state_type" == "completed" || "$state_type" == "canceled" || "$state_type" == "cancelled" ]]; then + if $execute; then + rm -rf "$dir" + ok " $uuid ($issue_id, $issue_state) — removed" + else + ok " $uuid ($issue_id, $issue_state) — would remove" + fi + count_workspaces=$((count_workspaces + 1)) + else + info " $uuid ($issue_id, $issue_state) — active, keeping" + fi + done + if ! $has_workspaces; then + info " (none found)" + fi + else + info " (workspace root not found: $workspace_root)" + fi + echo "" + + # --- 2. Orphaned PRs --- + info "Orphaned PRs:" + if $skip_github; then + warn " skipped (--skip-github)" + elif [[ -z "$github_repo" ]]; then + warn " skipped (REPO_URL not set)" + elif ! command -v gh &>/dev/null; then + warn " skipped (gh CLI not found)" + else + local pr_json + pr_json="$(gh pr list --repo "$github_repo" --state open --json number,title,headRefName 2>/dev/null)" || { + warn " skipped (gh pr list failed)" + pr_json="[]" + } + + # Use gh's --jq to extract tab-delimited fields (avoids fragile JSON parsing) + local pr_lines + pr_lines="$(gh pr list --repo "$github_repo" --state open \ + --json number,title,headRefName \ + --jq '.[] | [.number, .headRefName, .title] | @tsv' 2>/dev/null)" || { + warn " skipped (gh pr list failed)" + pr_lines="" + } + + if [[ -z "$pr_lines" ]]; then + info " (no open PRs)" + else + while IFS=$'\t' read -r pr_number pr_branch pr_title; do + [[ -n "$pr_number" ]] || continue + + # Check if branch matches pipeline pattern (eric/mob-*) + if [[ "$pr_branch" =~ ^eric/mob- ]]; then + # Extract MOB identifier from title or branch + local mob_id + mob_id="$(echo "$pr_title" | grep -oE 'MOB-[0-9]+' | head -1)" + [[ -n "$mob_id" ]] || mob_id="$(echo "$pr_branch" | grep -oE 'mob-[0-9]+' | head -1 | tr '[:lower:]' '[:upper:]')" + + local should_close=false + local reason="" + + if ! $skip_linear && [[ -n "${LINEAR_API_KEY:-}" ]] && [[ -n "$mob_id" ]]; then + # Parse team key and number from identifier (e.g., MOB-16 → team=MOB, number=16) + local team_key issue_number + team_key="$(echo "$mob_id" | cut -d'-' -f1)" + issue_number="$(echo "$mob_id" | cut -d'-' -f2)" + + local pr_response + pr_response="$(curl -s -X POST https://api.linear.app/graphql \ + -H "Content-Type: application/json" \ + -H "Authorization: $LINEAR_API_KEY" \ + -d "{\"query\":\"{ issues(filter: { number: { eq: $issue_number }, team: { key: { eq: \\\"$team_key\\\" } } }) { nodes { identifier state { name type } } } }\"}" 2>/dev/null)" || true + + local pr_state_type + pr_state_type="$(echo "$pr_response" | grep -o '"type":"[^"]*"' | head -1 | cut -d'"' -f4)" + + if [[ "$pr_state_type" == "completed" || "$pr_state_type" == "canceled" || "$pr_state_type" == "cancelled" ]]; then + should_close=true + local pr_issue_state + pr_issue_state="$(echo "$pr_response" | grep -o '"name":"[^"]*"' | head -1 | cut -d'"' -f4)" + reason="$mob_id is $pr_issue_state" + fi + fi + + if $should_close; then + if $execute; then + gh pr close "$pr_number" --repo "$github_repo" --delete-branch 2>/dev/null && \ + ok " PR #$pr_number ($reason) — closed + branch deleted" || \ + warn " PR #$pr_number ($reason) — failed to close" + else + ok " PR #$pr_number ($reason) — would close + delete branch" + fi + count_prs=$((count_prs + 1)) + else + info " PR #$pr_number ($pr_branch) — issue still active, keeping" + fi + fi + done <<< "$pr_lines" + fi + fi + echo "" + + # --- 3. Stale "In Progress" issues --- + info "Stale issues:" + if $skip_linear || [[ -z "${LINEAR_API_KEY:-}" ]]; then + warn " skipped (Linear not available)" + else + local ip_response + ip_response="$(curl -s -X POST https://api.linear.app/graphql \ + -H "Content-Type: application/json" \ + -H "Authorization: $LINEAR_API_KEY" \ + -d "{\"query\":\"{ issues(filter: { project: { slugId: { eq: \\\"$project_slug\\\" } }, state: { name: { eq: \\\"In Progress\\\" } } }) { nodes { id identifier title state { name } } } }\"}" 2>/dev/null)" || true + + # Try to get dashboard state for cross-reference + local dashboard_state="" + local dashboard_available=false + dashboard_state="$(curl -s --connect-timeout 2 "http://localhost:$dashboard_port/api/v1/state" 2>/dev/null)" || true + if [[ -n "$dashboard_state" ]] && echo "$dashboard_state" | grep -q '"agents"' 2>/dev/null; then + dashboard_available=true + fi + + # Parse In Progress issues + local ip_issues + ip_issues="$(echo "$ip_response" | grep -o '"identifier":"[^"]*"' | cut -d'"' -f4 || true)" + + if [[ -z "$ip_issues" ]]; then + info " (no In Progress issues)" + else + local idx=0 + while IFS= read -r ident; do + local ip_title + # Extract corresponding title (nth occurrence) + ip_title="$(echo "$ip_response" | grep -o '"title":"[^"]*"' | sed -n "$((idx + 1))p" | cut -d'"' -f4)" + idx=$((idx + 1)) + + local has_worker=false + if $dashboard_available; then + # Check if issue identifier appears in dashboard state + if echo "$dashboard_state" | grep -q "$ident" 2>/dev/null; then + has_worker=true + fi + fi + + if $has_worker; then + info " $ident \"$ip_title\" — In Progress, worker active" + elif $dashboard_available; then + warn " $ident \"$ip_title\" — In Progress, no active worker" + count_stale_issues=$((count_stale_issues + 1)) + else + warn " $ident \"$ip_title\" — In Progress (dashboard unreachable, cannot verify worker)" + count_stale_issues=$((count_stale_issues + 1)) + fi + done <<< "$ip_issues" + fi + fi + echo "" + + # --- 4. Log files --- + info "Log files:" + local has_logs=false + while IFS= read -r logfile; do + [[ -f "$logfile" ]] || continue + has_logs=true + + # Check age — find files older than 7 days + if [[ "$(uname)" == "Darwin" ]]; then + local file_age_days + local file_mod + file_mod="$(stat -f %m "$logfile")" + local now + now="$(date +%s)" + file_age_days=$(( (now - file_mod) / 86400 )) + else + local file_age_days=0 + if find "$logfile" -mtime +7 -print | grep -q .; then + file_age_days=8 + fi + fi + + if [[ "$file_age_days" -ge 7 ]]; then + if $execute; then + rm -f "$logfile" + ok " $logfile (${file_age_days}d old) — removed" + else + ok " $logfile (${file_age_days}d old) — would remove" + fi + count_logs=$((count_logs + 1)) + else + info " $logfile (${file_age_days}d old) — recent, keeping" + fi + done < <(find "$LOG_DIR" -name "*.log" -o -name "*.jsonl" 2>/dev/null) + if ! $has_logs; then + info " (none found)" + fi + echo "" + + # --- Prune branches (optional phase) --- + local count_branches=0 + if $prune_branches; then + info "=== Prune Branches ===" + _do_prune_branches "$execute" + count_branches="$_PRUNE_BRANCHES_COUNT" + fi + + # --- Summary --- + info "Summary: $count_workspaces workspaces, $count_prs PRs, $count_stale_issues stale issues, $count_logs logs, $count_branches branches" + if ! $execute; then + info "Run with --execute to apply." + fi +} + +# --- Prune Branches --- + +# Shared implementation: fetch --prune, list merged branches, optionally delete. +# Sets global _PRUNE_BRANCHES_COUNT to the number of branches acted on. +# Arguments: execute (true|false) +_do_prune_branches() { + local execute="${1:-false}" + _PRUNE_BRANCHES_COUNT=0 + + # Step 1: fetch --prune to clean stale remote-tracking refs + info "Fetching and pruning stale remote-tracking refs..." + git -C "$SYMPHONY_ROOT" fetch --prune + echo "" + + # Step 2: resolve the currently checked-out branch so we can skip it + local current_branch + current_branch="$(git -C "$SYMPHONY_ROOT" rev-parse --abbrev-ref HEAD 2>/dev/null || echo "")" + + # Step 3: list local branches fully merged into main + info "Local branches merged into main (skipping main, master, current):" + local found=false + while IFS= read -r raw_branch; do + # Strip leading whitespace and the "* " current-branch marker + local branch + branch="$(echo "$raw_branch" | xargs)" + branch="${branch#\* }" + [[ -z "$branch" ]] && continue + [[ "$branch" == "main" || "$branch" == "master" || "$branch" == "$current_branch" ]] && continue + found=true + if $execute; then + git -C "$SYMPHONY_ROOT" branch -d "$branch" + ok " $branch — deleted" + else + info " $branch — would delete" + fi + _PRUNE_BRANCHES_COUNT=$((_PRUNE_BRANCHES_COUNT + 1)) + done < <(git -C "$SYMPHONY_ROOT" branch --merged main 2>/dev/null) + + if ! $found; then + info " (none found)" + fi + echo "" +} + +cmd_prune_branches() { + local execute=false + + while [[ $# -gt 0 ]]; do + case "$1" in + --execute) execute=true ;; + --dry-run) execute=false ;; + *) die "Unknown flag: $1" ;; + esac + shift + done + + if $execute; then + info "symphony-ctl prune-branches [EXECUTING]" + else + info "symphony-ctl prune-branches [DRY RUN]" + fi + echo "" + + _do_prune_branches "$execute" + info "Summary: $_PRUNE_BRANCHES_COUNT merged branch(es)" + if ! $execute; then + info "Run with --execute to apply." + fi +} + +# --- Analyze --- + +# Format milliseconds into a human-readable duration string. +_fmt_duration() { + local ms="${1:-0}" + ms="${ms%%.*}" # strip any decimal fraction jq may emit + ms="${ms:-0}" + local s=$((ms / 1000)) + local m=$((s / 60)) + local h=$((m / 60)) + if [[ $ms -lt 1000 ]]; then echo "${ms}ms" + elif [[ $s -lt 60 ]]; then echo "${s}s" + elif [[ $m -lt 60 ]]; then printf "%dm %ds" "$m" "$((s % 60))" + else printf "%dh %dm" "$h" "$((m % 60))" + fi +} + +# Compute all analysis aggregates and return a JSON object via stdout. +# Arguments: stage_json turn_json log_path +_analyze_compute() { + local stages="$1" turns="$2" log_path="$3" + + jq -n \ + --argjson stages "$stages" \ + --argjson turns "$turns" \ + --arg log_path "$log_path" \ + ' + def safe_div(a; b): if b == 0 then 0.0 else (a / b) end; + + ($stages | length) as $stage_count | + ($turns | length) as $turn_event_count | + ($stages | [.[].total_tokens // 0] | add // 0) as $total_tokens | + ($stages | [.[].input_tokens // 0] | add // 0) as $total_input | + ($stages | [.[].output_tokens // 0] | add // 0) as $total_output | + ($stages | [.[].cache_read_tokens // 0] | add // 0) as $total_cache_read | + ($stages | [.[].cache_write_tokens // 0] | add // 0) as $total_cache_write | + ($stages | [.[].duration_ms // 0] | add // 0) as $total_duration | + ($stages | [.[].turn_count // 0] | add // 0) as $total_turns | + ($stages | [.[] | select(.outcome == "completed")] | length) as $completed | + ($stages | [.[] | select(.outcome == "failed")] | length) as $failed | + ($stages | [.[] | .issue_identifier // "unknown"] | unique | length) as $issue_count | + + safe_div($total_tokens; $stage_count) as $avg_tokens | + safe_div($total_turns; $stage_count) as $avg_turns | + safe_div($total_cache_read * 100; ($total_input + $total_cache_read)) as $cache_hit_pct | + + # Per-issue aggregates (sorted by total tokens desc) + ($stages | group_by(.issue_identifier // "unknown") | map({ + issue: (.[0].issue_identifier // "unknown"), + stages: length, + turns: ([.[].turn_count // 0] | add // 0), + total_tokens: ([.[].total_tokens // 0] | add // 0), + input_tokens: ([.[].input_tokens // 0] | add // 0), + output_tokens: ([.[].output_tokens // 0] | add // 0), + cache_read_tokens: ([.[].cache_read_tokens // 0] | add // 0), + duration_ms: ([.[].duration_ms // 0] | add // 0), + completed: ([.[] | select(.outcome == "completed")] | length), + failed: ([.[] | select(.outcome == "failed")] | length) + }) | sort_by(-.total_tokens)) as $per_issue | + + # Per-stage-name averages (sorted alphabetically by stage name) + ($stages | group_by(.stage_name // "unknown") | map({ + stage: (.[0].stage_name // "unknown"), + count: length, + avg_turns: safe_div([.[].turn_count // 0] | add // 0; length), + avg_tokens: safe_div([.[].total_tokens // 0] | add // 0; length), + avg_cache_read: safe_div([.[].cache_read_tokens // 0] | add // 0; length), + avg_duration_ms: safe_div([.[].duration_ms // 0] | add // 0; length), + completed: ([.[] | select(.outcome == "completed")] | length), + failed: ([.[] | select(.outcome == "failed")] | length) + }) | sort_by(.stage)) as $per_stage | + + # Per-turn stats from turn_completed events + ($turns | { + count: length, + avg_tokens: safe_div([.[].total_tokens // 0] | add // 0; length), + avg_input: safe_div([.[].input_tokens // 0] | add // 0; length), + avg_cache_read: safe_div([.[].cache_read_tokens // 0] | add // 0; length) + }) as $per_turn | + + # Outliers: stages where total_tokens or turn_count > 2x overall average + ([$stages[] | + select( + (($avg_tokens > 0) and ((.total_tokens // 0) > ($avg_tokens * 2))) or + (($avg_turns > 0) and ((.turn_count // 0) > ($avg_turns * 2))) + ) | + { + issue: (.issue_identifier // "unknown"), + stage: (.stage_name // "unknown"), + total_tokens: (.total_tokens // 0), + turn_count: (.turn_count // 0), + token_ratio: (if $avg_tokens > 0 then ((.total_tokens // 0) / $avg_tokens) else 0 end), + turn_ratio: (if $avg_turns > 0 then ((.turn_count // 0) / $avg_turns ) else 0 end) + } + ]) as $outliers | + + { + log_path: $log_path, + summary: { + stage_count: $stage_count, + turn_event_count: $turn_event_count, + issue_count: $issue_count, + total_tokens: $total_tokens, + total_input_tokens: $total_input, + total_output_tokens: $total_output, + total_cache_read_tokens: $total_cache_read, + total_cache_write_tokens: $total_cache_write, + total_turns: $total_turns, + total_duration_ms: $total_duration, + completed: $completed, + failed: $failed, + avg_tokens_per_stage: $avg_tokens, + avg_turns_per_stage: $avg_turns, + cache_hit_pct: $cache_hit_pct + }, + per_issue: $per_issue, + per_stage: $per_stage, + per_turn: $per_turn, + outliers: $outliers + } + ' +} + +# Print a human-readable report from the JSON produced by _analyze_compute. +_analyze_print() { + local report="$1" + + local BAR="════════════════════════════════════════════════════════════" + local SEP="────────────────────────────────────────────────────────────" + + # --- Header --- + local log_path + log_path="$(echo "$report" | jq -r '.log_path')" + echo "" + echo "$BAR" + echo " SYMPHONY RUN ANALYSIS" + printf " Log: %s\n" "$log_path" + echo "$BAR" + echo "" + + # --- Run Summary --- + local stage_count issue_count completed failed + local total_tokens total_input total_output total_cache_read + local cache_hit_pct total_dur_ms avg_turns avg_tokens turn_event_count + stage_count=$( echo "$report" | jq -r '.summary.stage_count') + issue_count=$( echo "$report" | jq -r '.summary.issue_count') + completed=$( echo "$report" | jq -r '.summary.completed') + failed=$( echo "$report" | jq -r '.summary.failed') + total_tokens=$( echo "$report" | jq -r '.summary.total_tokens') + total_input=$( echo "$report" | jq -r '.summary.total_input_tokens') + total_output=$( echo "$report" | jq -r '.summary.total_output_tokens') + total_cache_read=$( echo "$report" | jq -r '.summary.total_cache_read_tokens') + cache_hit_pct=$( echo "$report" | jq -r '.summary.cache_hit_pct | . * 10 | round / 10') + total_dur_ms=$( echo "$report" | jq -r '.summary.total_duration_ms') + avg_turns=$( echo "$report" | jq -r '.summary.avg_turns_per_stage | . * 10 | round / 10') + avg_tokens=$( echo "$report" | jq -r '.summary.avg_tokens_per_stage | round') + turn_event_count=$( echo "$report" | jq -r '.summary.turn_event_count') + + echo "Run Summary" + echo "$SEP" + printf " Stages: %d (%d completed, %d failed)\n" "$stage_count" "$completed" "$failed" + printf " Issues: %d\n" "$issue_count" + printf " Turns logged: %d\n" "$turn_event_count" + printf " Total time: %s\n" "$(_fmt_duration "$total_dur_ms")" + printf " Tokens: %d total\n" "$total_tokens" + printf " Input: %d\n" "$total_input" + printf " Output: %d\n" "$total_output" + printf " Cache hit: %d (%.1f%% of input)\n" "$total_cache_read" "$cache_hit_pct" + printf " Avg/stage: %.1f turns, %d tokens\n" "$avg_turns" "$avg_tokens" + echo "" + + # --- Per-Issue Table --- + local issue_row_count + issue_row_count=$(echo "$report" | jq -r '.per_issue | length') + + if [[ "$issue_row_count" -gt 0 ]]; then + echo "Per-Issue Summary" + echo "$SEP" + printf " %-14s %6s %5s %10s %10s %s\n" \ + "ISSUE" "STAGES" "TURNS" "TOKENS" "DURATION" "STATUS" + while IFS=$'\t' read -r issue stages turns tokens dur status; do + printf " %-14s %6s %5s %10d %10s %s\n" \ + "$issue" "$stages" "$turns" "$tokens" "$(_fmt_duration "$dur")" "$status" + done < <(echo "$report" | jq -r ' + .per_issue[] | + [ + (.issue // "unknown"), + (.stages | tostring), + (.turns | tostring), + (.total_tokens | tostring), + (.duration_ms | tostring), + (if .failed > 0 then "FAILED(\(.failed))" else "ok" end) + ] | @tsv + ') + echo "" + fi + + # --- Per-Stage Averages --- + local stage_row_count + stage_row_count=$(echo "$report" | jq -r '.per_stage | length') + + if [[ "$stage_row_count" -gt 0 ]]; then + echo "Per-Stage Averages" + echo "$SEP" + printf " %-14s %5s %9s %10s %10s %s\n" \ + "STAGE" "COUNT" "AVG TURNS" "AVG TOKENS" "AVG TIME" "OK/FAIL" + while IFS=$'\t' read -r stage count avg_t avg_tok avg_dur ok_fail; do + printf " %-14s %5s %9s %10d %10s %s\n" \ + "$stage" "$count" "$avg_t" "$avg_tok" "$(_fmt_duration "$avg_dur")" "$ok_fail" + done < <(echo "$report" | jq -r ' + .per_stage[] | + [ + (.stage // "unknown"), + (.count | tostring), + (.avg_turns | . * 10 | round / 10 | tostring), + (.avg_tokens | round | tostring), + (.avg_duration_ms | round | tostring), + "\(.completed)/\(.failed)" + ] | @tsv + ') + echo "" + fi + + # --- Per-Turn Granularity --- + local turn_count turn_avg_tokens turn_avg_input turn_avg_cache + turn_count=$( echo "$report" | jq -r '.per_turn.count') + turn_avg_tokens=$( echo "$report" | jq -r '.per_turn.avg_tokens | round') + turn_avg_input=$( echo "$report" | jq -r '.per_turn.avg_input | round') + turn_avg_cache=$( echo "$report" | jq -r '.per_turn.avg_cache_read | round') + + if [[ "$turn_count" -gt 0 ]]; then + echo "Per-Turn Granularity" + echo "$SEP" + printf " Turns observed: %d\n" "$turn_count" + printf " Avg tokens/turn: %d total (%d input, %d cache read)\n" \ + "$turn_avg_tokens" "$turn_avg_input" "$turn_avg_cache" + echo "" + fi + + # --- Cache Efficiency --- + echo "Cache Efficiency" + echo "$SEP" + printf " Overall: %.1f%% of input served from cache (%d tokens)\n" \ + "$cache_hit_pct" "$total_cache_read" + if [[ "$issue_row_count" -gt 0 ]]; then + while IFS=$'\t' read -r issue cr pct; do + printf " %-14s: %.1f%% cache hit (%d tokens)\n" "$issue" "$pct" "$cr" + done < <(echo "$report" | jq -r ' + .per_issue[] | + (if (.input_tokens + .cache_read_tokens) > 0 + then (.cache_read_tokens * 100 / (.input_tokens + .cache_read_tokens)) + else 0 end) as $pct | + [ + (.issue // "unknown"), + (.cache_read_tokens | tostring), + ($pct | . * 10 | round / 10 | tostring) + ] | @tsv + ') + fi + echo "" + + # --- Outlier Flags --- + local outlier_count + outlier_count=$(echo "$report" | jq -r '.outliers | length') + + echo "Outlier Flags" + echo "$SEP" + if [[ "$outlier_count" -gt 0 ]]; then + while IFS=$'\t' read -r issue stage tokens turns token_ratio turn_ratio; do + printf " %s / %s: %d tokens (%.1fx avg), %d turns (%.1fx avg)\n" \ + "$issue" "$stage" "$tokens" "$token_ratio" "$turns" "$turn_ratio" + done < <(echo "$report" | jq -r ' + .outliers[] | + [ + (.issue // "unknown"), + (.stage // "unknown"), + (.total_tokens | tostring), + (.turn_count | tostring), + (.token_ratio | . * 10 | round / 10 | tostring), + (.turn_ratio | . * 10 | round / 10 | tostring) + ] | @tsv + ') + else + echo " (none)" + fi + echo "" +} + +cmd_analyze() { + local json_output=false + local log_path="" + + # Parse flags and positional argument + while [[ $# -gt 0 ]]; do + case "$1" in + --json) + json_output=true ;; + -*) + die "Unknown flag: $1" ;; + *) + [[ -z "$log_path" ]] || die "Unexpected argument: $1" + log_path="$1" + ;; + esac + shift + done + + # Default: symphony.jsonl under $LOG_DIR + if [[ -z "$log_path" ]]; then + log_path="$LOG_DIR/symphony.jsonl" + [[ -f "$log_path" ]] || die "No symphony.jsonl found at $log_path. Pass a path explicitly." + fi + + [[ -f "$log_path" ]] || die "Log file not found: $log_path" + command -v jq &>/dev/null || die "jq is required for the analyze command. Install with: brew install jq" + + # Slurp only the event types we care about (ignore all others) + local stage_json turn_json + stage_json="$(jq -cs '[.[] | select(.event == "stage_completed")]' "$log_path" 2>/dev/null || echo '[]')" + turn_json="$(jq -cs '[.[] | select(.event == "turn_completed")]' "$log_path" 2>/dev/null || echo '[]')" + + # Compute aggregates + local report + report="$(_analyze_compute "$stage_json" "$turn_json" "$log_path")" + + if $json_output; then + echo "$report" + else + _analyze_print "$report" + fi +} + +# --- Main --- + +usage() { + cat < + +Commands: + install Register the launchd service (does not start it) + uninstall Stop and remove the launchd service + start Start the service + stop Stop the service + restart Stop and start the service + status Show service status and recent logs + logs Open full log in pager (default: stderr, pass 'stdout' for stdout) + tail Tail both stdout and stderr logs + install-logrotate Install newsyslog config for log rotation (requires sudo) + cleanup Detect stale pipeline artifacts (dry-run by default) + --execute Actually remove/close artifacts + --skip-github Skip PR/branch cleanup + --skip-linear Skip Linear API queries + --prune-branches Also run branch pruning phase + prune-branches Prune merged local branches (dry-run by default) + --execute Delete merged branches (git branch -d) + --dry-run Print what would be deleted (default) + analyze Analyze a JSONL run log and print a report + [path] Path to symphony.jsonl + (default: \$LOG_DIR/symphony.jsonl) + --json Output machine-readable JSON instead of text + +Environment: + SYMPHONY_PROJECT Project name for label/logs (default: symphony) + SYMPHONY_ENV_FILE Path to .env file (default: /.env) + SYMPHONY_WORKFLOW Path to WORKFLOW.md (default: pipeline-config/workflows/WORKFLOW-\$SYMPHONY_PROJECT.md) + SYMPHONY_NODE Path to node binary (default: auto-detected) + +EOF +} + +case "${1:-}" in + install) cmd_install ;; + uninstall) cmd_uninstall ;; + start) cmd_start ;; + stop) cmd_stop ;; + restart) cmd_restart ;; + status) cmd_status ;; + logs) cmd_logs "${2:-stderr}" ;; + tail) cmd_tail ;; + install-logrotate) cmd_install_logrotate ;; + cleanup) shift; cmd_cleanup "$@" ;; + prune-branches) shift; cmd_prune_branches "$@" ;; + analyze) shift; cmd_analyze "$@" ;; + -h|--help) usage ;; + --version|-V) "$NODE_BIN" "$CLI_JS" --version ;; + *) usage; exit 1 ;; +esac diff --git a/ops/symphony-deploy b/ops/symphony-deploy new file mode 100755 index 00000000..d662d2d8 --- /dev/null +++ b/ops/symphony-deploy @@ -0,0 +1,302 @@ +#!/usr/bin/env bash +set -euo pipefail + +# symphony-deploy — pull, build, and restart symphony-ts on the server +# Usage: symphony-deploy [--dry-run] [--no-restart] [--symphony] [--config] + +SCRIPT_DIR="$(cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")" && pwd)" +SYMPHONY_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Defaults — override via environment +CLAUDE_CONFIG_DIR="${CLAUDE_CONFIG_DIR:-$HOME/projects/claude-config}" +CTL="$SCRIPT_DIR/symphony-ctl" +export SOPS_AGE_KEY_FILE="${SOPS_AGE_KEY_FILE:-$HOME/.config/sops/age/keys.txt}" + +# Colors (disabled if not a terminal) +if [[ -t 1 ]]; then + RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[0;33m'; CYAN='\033[0;36m'; NC='\033[0m' +else + RED=''; GREEN=''; YELLOW=''; CYAN=''; NC='' +fi + +info() { echo -e "${CYAN}▸${NC} $*"; } +ok() { echo -e "${GREEN}✓${NC} $*"; } +warn() { echo -e "${YELLOW}⚠${NC} $*" >&2; } +die() { echo -e "${RED}✗${NC} $*" >&2; exit 1; } + +# --- Helpers --- + +usage() { + cat </dev/null || echo "(unknown)" + else + echo "(symphony-ctl not found)" + fi +} + +# --- Pull a git repo --- +# Arguments: repo_path repo_label +# Sets globals: _PRE_SHA _POST_SHA _LOCKFILE_CHANGED _ENV_ENC_CHANGED +pull_repo() { + local repo_path="$1" + local label="$2" + + info "Pulling $label ($repo_path)..." + _PRE_SHA="$(git -C "$repo_path" rev-parse HEAD)" + + if $DRY_RUN; then + info "[dry-run] git -C $repo_path pull --ff-only" + _POST_SHA="$_PRE_SHA" + _LOCKFILE_CHANGED=false + _ENV_ENC_CHANGED=false + return + fi + + git -C "$repo_path" pull --ff-only || die "git pull --ff-only failed in $repo_path. Branches may have diverged — resolve manually." + _POST_SHA="$(git -C "$repo_path" rev-parse HEAD)" + + if [[ "$_PRE_SHA" == "$_POST_SHA" ]]; then + ok "$label already up to date (${_PRE_SHA:0:8})" + _LOCKFILE_CHANGED=false + _ENV_ENC_CHANGED=false + return + fi + + ok "$label updated: ${_PRE_SHA:0:8} → ${_POST_SHA:0:8}" + + # Check what changed between old and new SHA + local changed_files + changed_files="$(git -C "$repo_path" diff --name-only "$_PRE_SHA" "$_POST_SHA")" + + _LOCKFILE_CHANGED=false + if echo "$changed_files" | grep -q '^pnpm-lock\.yaml$'; then + _LOCKFILE_CHANGED=true + fi + + _ENV_ENC_CHANGED=false + if echo "$changed_files" | grep -q '^\.env\.enc$'; then + _ENV_ENC_CHANGED=true + fi +} + +# --- Main --- + +if $DRY_RUN; then + info "symphony-deploy [DRY RUN]" +else + info "symphony-deploy" +fi +echo "" + +NEED_ENV_RESTART=false +SYMPHONY_UPDATED=false +SYMPH_PRE_SHA="" +SYMPH_POST_SHA="" + +# --- 1. Symphony-ts repo --- +if $DO_SYMPHONY; then + info "=== symphony-ts ===" + [[ -d "$SYMPHONY_ROOT/.git" ]] || die "Not a git repo: $SYMPHONY_ROOT" + + # Pre-deploy version + info "Pre-deploy version: $(get_version)" + + info "Ensuring symphony-ts is on main..." + run_or_dry git -C "$SYMPHONY_ROOT" checkout main + + pull_repo "$SYMPHONY_ROOT" "symphony-ts" + SYMPH_PRE_SHA="$_PRE_SHA" + SYMPH_POST_SHA="$_POST_SHA" + + if [[ "$SYMPH_PRE_SHA" != "$SYMPH_POST_SHA" ]]; then + SYMPHONY_UPDATED=true + + # pnpm install if lockfile changed + if $_LOCKFILE_CHANGED; then + info "pnpm-lock.yaml changed — installing dependencies..." + run_or_dry pnpm install --frozen-lockfile --dir "$SYMPHONY_ROOT" + ok "Dependencies installed" + else + ok "pnpm-lock.yaml unchanged — skipping install" + fi + + # Always rebuild if code changed + info "Building..." + run_or_dry pnpm run --dir "$SYMPHONY_ROOT" build + ok "Build complete" + fi + + # Decrypt .env.enc if it's newer than .env + local_env="$SYMPHONY_ROOT/.env" + local_enc="$SYMPHONY_ROOT/.env.enc" + if [[ -f "$local_enc" ]]; then + if [[ ! -f "$local_env" ]] || [[ "$local_enc" -nt "$local_env" ]]; then + info ".env.enc is newer than .env — decrypting..." + if $DRY_RUN; then + info "[dry-run] sops --decrypt --input-type dotenv --output-type dotenv $local_enc > $local_env" + else + sops --decrypt --input-type dotenv --output-type dotenv "$local_enc" > "$local_env" + fi + ok ".env decrypted" + NEED_ENV_RESTART=true + else + ok ".env is current — skipping decrypt" + fi + fi + + echo "" +fi + +# --- 2. Claude-config repo --- +if $DO_CONFIG; then + info "=== claude-config ===" + + if [[ ! -d "$CLAUDE_CONFIG_DIR" ]]; then + warn "claude-config dir not found at $CLAUDE_CONFIG_DIR — skipping" + elif [[ ! -d "$CLAUDE_CONFIG_DIR/.git" ]]; then + warn "$CLAUDE_CONFIG_DIR is not a git repo — skipping" + else + pull_repo "$CLAUDE_CONFIG_DIR" "claude-config" + CONFIG_PRE_SHA="$_PRE_SHA" + CONFIG_POST_SHA="$_POST_SHA" + + # Apply config changes (symlinks skills, CLAUDE.md, RTK.md, merges settings.json) + if [[ -x "$CLAUDE_CONFIG_DIR/deploy.sh" ]]; then + info "Applying claude-config..." + run_or_dry "$CLAUDE_CONFIG_DIR/deploy.sh" + ok "claude-config applied" + fi + fi + + echo "" +fi + +# --- 3. Service restart --- +if ! $NO_RESTART && $DO_SYMPHONY; then + info "=== Service ===" + + if ! service_installed; then + info "Service not installed — skipping restart" + elif $NEED_ENV_RESTART; then + # .env changed — must uninstall/install to rebake env vars into plist + info ".env was refreshed — reinstalling service (plist bakes env vars)..." + run_or_dry "$CTL" uninstall + run_or_dry "$CTL" install + run_or_dry "$CTL" start + ok "Service reinstalled and started" + elif $SYMPHONY_UPDATED; then + info "Code updated — restarting service..." + run_or_dry "$CTL" restart + ok "Service restarted" + else + ok "No changes — service left as-is" + fi + + echo "" +fi + +# --- 3b. Slack bridge restart --- +if ! $NO_RESTART && $DO_SYMPHONY; then + SLACK_CTL="$SCRIPT_DIR/slack-bridge-ctl" + SLACK_PLIST="$HOME/Library/LaunchAgents/com.slack-bridge.plist" + + if [[ ! -x "$SLACK_CTL" ]]; then + info "slack-bridge-ctl not found — skipping" + elif [[ ! -f "$SLACK_PLIST" ]]; then + info "Slack bridge not installed — skipping" + elif $NEED_ENV_RESTART; then + info ".env was refreshed — reinstalling slack bridge..." + run_or_dry "$SLACK_CTL" uninstall + run_or_dry "$SLACK_CTL" install + run_or_dry "$SLACK_CTL" start + ok "Slack bridge reinstalled and started" + elif $SYMPHONY_UPDATED; then + info "Code updated — restarting slack bridge..." + run_or_dry "$SLACK_CTL" restart + ok "Slack bridge restarted" + else + ok "No changes — slack bridge left as-is" + fi + + echo "" +fi + +# --- 4. Summary --- +info "=== Summary ===" + +if $DO_SYMPHONY; then + printf " symphony-ts: %s → %s\n" "${SYMPH_PRE_SHA:0:8}" "${SYMPH_POST_SHA:0:8}" + info "Post-deploy version: $(get_version)" +fi + +if $DO_CONFIG; then + if [[ -n "${CONFIG_PRE_SHA:-}" ]]; then + printf " claude-config: %s → %s\n" "${CONFIG_PRE_SHA:0:8}" "${CONFIG_POST_SHA:0:8}" + else + printf " claude-config: (skipped)\n" + fi +fi + +if $DRY_RUN; then + echo "" + info "Dry run complete — no changes were made." +fi diff --git a/ops/symphony-onboard b/ops/symphony-onboard new file mode 100755 index 00000000..9257091c --- /dev/null +++ b/ops/symphony-onboard @@ -0,0 +1,552 @@ +#!/usr/bin/env bash +set -euo pipefail + +# symphony-onboard — onboard a new project into the Symphony pipeline +# Usage: symphony-onboard --product --team-key --team-name --description [--repo ] [--dry-run] + +SCRIPT_DIR="$(cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")" && pwd)" +SYMPHONY_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +TEMPLATE_DIR="$SYMPHONY_ROOT/pipeline-config/templates" +WORKFLOW_DIR="$SYMPHONY_ROOT/pipeline-config/workflows" +PORTS_FILE="$SYMPHONY_ROOT/pipeline-config/ports.json" +RUN_PIPELINE="$SYMPHONY_ROOT/run-pipeline.sh" + +# Colors (disabled if not a terminal) +if [[ -t 1 ]]; then + RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[0;33m'; CYAN='\033[0;36m'; NC='\033[0m' +else + RED=''; GREEN=''; YELLOW=''; CYAN=''; NC='' +fi + +info() { echo -e "${CYAN}▸${NC} $*"; } +ok() { echo -e "${GREEN}✓${NC} $*"; } +warn() { echo -e "${YELLOW}⚠${NC} $*" >&2; } +die() { echo -e "${RED}✗${NC} $*" >&2; exit 1; } + +# --- Temp directory cleanup --- + +TMPDIR_ONBOARD="" +cleanup() { + if [[ -n "$TMPDIR_ONBOARD" && -d "$TMPDIR_ONBOARD" ]]; then + rm -rf "$TMPDIR_ONBOARD" + fi +} +trap cleanup EXIT + +# --- Helpers --- + +usage() { + cat < Product name (e.g., "my-project") + --team-key Linear team key (e.g., "MYPROJ") + --team-name Linear team display name (e.g., "My Project Team") + --description Project description for Linear + +Optional flags: + --repo GitHub repository (default: mobilyze-llc/{product}) + --dry-run Show what each step would do without executing + -h, --help Show this help message + +Environment: + LINEAR_API_KEY Linear API token (required) + +Steps performed: + 1. Duplicate detection (check ports.json) + 2. Linear team creation (idempotent) + 3. Linear project creation + team linking (idempotent) + 4. Port auto-allocation (max+1 from ports.json) + 5. run-pipeline.sh auto-registration + 6. Generate WORKFLOW file from template + 7. Verify repo exists + 8. Generate CLAUDE.md in target repo + 9. Copy CI minimal workflow to target repo + 10. Set up GitHub merge queue via Rulesets API + 11. Summary + +EOF +} + +run_or_dry() { + if $DRY_RUN; then + info "[dry-run] $*" + else + "$@" + fi +} + +# --- Precondition checks --- + +check_gh_auth() { + gh auth status >/dev/null 2>&1 || die "GitHub CLI not authenticated. Run 'gh auth login' first." +} + +check_linear_key() { + [[ -n "${LINEAR_API_KEY:-}" ]] || die "LINEAR_API_KEY environment variable is not set." +} + +check_repo_exists() { + local repo="$1" + gh api "/repos/$repo" --silent >/dev/null 2>&1 || die "Repository '$repo' not found or not accessible." +} + +check_templates() { + [[ -f "$TEMPLATE_DIR/WORKFLOW-template.md" ]] || die "WORKFLOW template not found at $TEMPLATE_DIR/WORKFLOW-template.md" + [[ -f "$TEMPLATE_DIR/CLAUDE.md.tmpl" ]] || die "CLAUDE.md template not found at $TEMPLATE_DIR/CLAUDE.md.tmpl" + [[ -f "$TEMPLATE_DIR/ci-minimal.yml" ]] || die "CI minimal template not found at $TEMPLATE_DIR/ci-minimal.yml" +} + +check_jq() { + command -v jq &>/dev/null || die "jq is required but not found. Install it first." +} + +check_ports_file() { + [[ -f "$PORTS_FILE" ]] || die "ports.json not found at $PORTS_FILE" +} + +check_run_pipeline() { + [[ -f "$RUN_PIPELINE" ]] || die "run-pipeline.sh not found at $RUN_PIPELINE" +} + +check_linear_cli() { + command -v linear &>/dev/null || die "linear CLI is required but not found. Install it first." +} + +# --- Flags --- + +DRY_RUN=false +PRODUCT="" +TEAM_KEY="" +TEAM_NAME="" +DESCRIPTION="" +REPO="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --dry-run) DRY_RUN=true ;; + --product) PRODUCT="${2:-}"; [[ -n "$PRODUCT" ]] || die "--product requires a value"; shift ;; + --team-key) TEAM_KEY="${2:-}"; [[ -n "$TEAM_KEY" ]] || die "--team-key requires a value"; shift ;; + --team-name) TEAM_NAME="${2:-}"; [[ -n "$TEAM_NAME" ]] || die "--team-name requires a value"; shift ;; + --description) DESCRIPTION="${2:-}"; [[ -n "$DESCRIPTION" ]] || die "--description requires a value"; shift ;; + --repo) REPO="${2:-}"; [[ -n "$REPO" ]] || die "--repo requires a value"; shift ;; + -h|--help) usage; exit 0 ;; + *) die "Unknown flag: $1" ;; + esac + shift +done + +# Validate required flags +[[ -n "$PRODUCT" ]] || die "Missing required flag: --product" +[[ -n "$TEAM_KEY" ]] || die "Missing required flag: --team-key" +[[ -n "$TEAM_NAME" ]] || die "Missing required flag: --team-name" +[[ -n "$DESCRIPTION" ]] || die "Missing required flag: --description" + +# Default repo to mobilyze-llc/{product} when not provided +if [[ -z "$REPO" ]]; then + REPO="mobilyze-llc/$PRODUCT" +fi + +# These will be populated by steps 3 and 4 +PROJECT_SLUG="" +PORT="" + +# --- Main --- + +if $DRY_RUN; then + info "symphony-onboard [DRY RUN]" +else + info "symphony-onboard" +fi +info " product: $PRODUCT" +info " team-key: $TEAM_KEY" +info " team-name: $TEAM_NAME" +info " description: $DESCRIPTION" +info " repo: $REPO" +echo "" + +# Create temp directory for intermediate files +TMPDIR_ONBOARD="$(mktemp -d)" + +# --- Preconditions --- + +info "=== Precondition checks ===" +check_gh_auth +ok "GitHub CLI authenticated" + +check_linear_key +ok "LINEAR_API_KEY is set" + +check_jq +ok "jq is available" + +check_templates +ok "All templates found" + +check_ports_file +ok "ports.json found" + +check_run_pipeline +ok "run-pipeline.sh found" + +check_linear_cli +ok "linear CLI available" + +echo "" + +# --- Step 1: Duplicate detection --- + +info "=== Step 1: Duplicate detection ===" + +EXISTING_PORT="$(jq -r --arg p "$PRODUCT" '.[$p] // empty' "$PORTS_FILE")" +if [[ -n "$EXISTING_PORT" ]]; then + die "Product '$PRODUCT' already exists in ports.json (port: $EXISTING_PORT). Aborting to prevent duplicate onboarding." +fi +ok "Product '$PRODUCT' not found in ports.json — proceeding" + +echo "" + +# --- Step 2: Linear team creation (idempotent) --- + +info "=== Step 2: Linear team creation ===" + +if $DRY_RUN; then + info "[dry-run] Would check for existing Linear team with key '$TEAM_KEY'" + info "[dry-run] Would create team '$TEAM_NAME' (key: $TEAM_KEY) if not found" + TEAM_ID="dry-run-team-id" +else + # Check if team already exists + TEAM_QUERY='query { teams(filter: { key: { eq: "'"$TEAM_KEY"'" } }) { nodes { id name key } } }' + TEAM_RESULT="$(linear api "$TEAM_QUERY" 2>/dev/null || true)" + + if [[ -z "$TEAM_RESULT" ]]; then + die "Failed to query Linear API for teams. Check LINEAR_API_KEY and network connectivity." + fi + + TEAM_COUNT="$(echo "$TEAM_RESULT" | jq -r '.data.teams.nodes | length' 2>/dev/null || echo "0")" + + if [[ "$TEAM_COUNT" -gt 0 ]]; then + TEAM_ID="$(echo "$TEAM_RESULT" | jq -r '.data.teams.nodes[0].id')" + TEAM_EXISTING_NAME="$(echo "$TEAM_RESULT" | jq -r '.data.teams.nodes[0].name')" + ok "Linear team already exists: $TEAM_EXISTING_NAME (key: $TEAM_KEY, id: $TEAM_ID) — skipping" + else + # Create team + CREATE_TEAM_QUERY='mutation { teamCreate(input: { name: "'"$TEAM_NAME"'", key: "'"$TEAM_KEY"'", description: "'"$DESCRIPTION"'" }) { success team { id name key } } }' + CREATE_TEAM_RESULT="$(linear api "$CREATE_TEAM_QUERY" 2>/dev/null || true)" + + if [[ -z "$CREATE_TEAM_RESULT" ]]; then + die "Failed to create Linear team. Check LINEAR_API_KEY and network connectivity." + fi + + CREATE_SUCCESS="$(echo "$CREATE_TEAM_RESULT" | jq -r '.data.teamCreate.success' 2>/dev/null || echo "false")" + if [[ "$CREATE_SUCCESS" != "true" ]]; then + die "Linear team creation failed. Response: $CREATE_TEAM_RESULT" + fi + + TEAM_ID="$(echo "$CREATE_TEAM_RESULT" | jq -r '.data.teamCreate.team.id')" + ok "Linear team created: $TEAM_NAME (key: $TEAM_KEY, id: $TEAM_ID)" + fi +fi + +echo "" + +# --- Step 3: Linear project creation + team linking (idempotent) --- + +info "=== Step 3: Linear project creation + team linking ===" + +if $DRY_RUN; then + info "[dry-run] Would check for existing Linear project named '$PRODUCT'" + info "[dry-run] Would create project and link to team '$TEAM_KEY' if not found" + PROJECT_SLUG="dry-run-slug" +else + # Check if project already exists by name + PROJECT_QUERY='query { projects(filter: { name: { eq: "'"$PRODUCT"'" } }) { nodes { id name slugId teams { nodes { id key } } } } }' + PROJECT_RESULT="$(linear api "$PROJECT_QUERY" 2>/dev/null || true)" + + if [[ -z "$PROJECT_RESULT" ]]; then + die "Failed to query Linear API for projects. Check LINEAR_API_KEY and network connectivity." + fi + + PROJECT_COUNT="$(echo "$PROJECT_RESULT" | jq -r '.data.projects.nodes | length' 2>/dev/null || echo "0")" + + if [[ "$PROJECT_COUNT" -gt 0 ]]; then + PROJECT_SLUG="$(echo "$PROJECT_RESULT" | jq -r '.data.projects.nodes[0].slugId')" + PROJECT_ID="$(echo "$PROJECT_RESULT" | jq -r '.data.projects.nodes[0].id')" + ok "Linear project already exists: $PRODUCT (slug: $PROJECT_SLUG) — skipping creation" + + # Check if team is already linked + LINKED_TEAM_COUNT="$(echo "$PROJECT_RESULT" | jq -r --arg tk "$TEAM_KEY" '[.data.projects.nodes[0].teams.nodes[] | select(.key == $tk)] | length' 2>/dev/null || echo "0")" + if [[ "$LINKED_TEAM_COUNT" -gt 0 ]]; then + ok "Team '$TEAM_KEY' already linked to project — skipping" + else + warn "Team '$TEAM_KEY' not linked to project '$PRODUCT'. Link it manually via the Linear UI." + fi + else + # Create project with team link + CREATE_PROJECT_QUERY='mutation { projectCreate(input: { name: "'"$PRODUCT"'", description: "'"$DESCRIPTION"'", teamIds: ["'"$TEAM_ID"'"] }) { success project { id name slugId } } }' + CREATE_PROJECT_RESULT="$(linear api "$CREATE_PROJECT_QUERY" 2>/dev/null || true)" + + if [[ -z "$CREATE_PROJECT_RESULT" ]]; then + die "Failed to create Linear project. Check LINEAR_API_KEY and network connectivity." + fi + + CREATE_PROJECT_SUCCESS="$(echo "$CREATE_PROJECT_RESULT" | jq -r '.data.projectCreate.success' 2>/dev/null || echo "false")" + if [[ "$CREATE_PROJECT_SUCCESS" != "true" ]]; then + die "Linear project creation failed. Response: $CREATE_PROJECT_RESULT" + fi + + PROJECT_SLUG="$(echo "$CREATE_PROJECT_RESULT" | jq -r '.data.projectCreate.project.slugId')" + PROJECT_ID="$(echo "$CREATE_PROJECT_RESULT" | jq -r '.data.projectCreate.project.id')" + ok "Linear project created: $PRODUCT (slug: $PROJECT_SLUG, id: $PROJECT_ID)" + ok "Team '$TEAM_KEY' linked to project" + fi +fi + +echo "" + +# --- Step 4: Port auto-allocation --- + +info "=== Step 4: Port auto-allocation ===" + +if $DRY_RUN; then + MAX_PORT="$(jq '[to_entries[] | .value] | max' "$PORTS_FILE" 2>/dev/null || echo "4320")" + PORT=$((MAX_PORT + 1)) + info "[dry-run] Would allocate port $PORT (max existing: $MAX_PORT)" + info "[dry-run] Would write '$PRODUCT': $PORT to ports.json" +else + # Read max port and allocate next + MAX_PORT="$(jq '[to_entries[] | .value] | max' "$PORTS_FILE")" + PORT=$((MAX_PORT + 1)) + + # Write new entry to ports.json + jq --arg p "$PRODUCT" --argjson port "$PORT" '. + {($p): $port}' "$PORTS_FILE" > "$TMPDIR_ONBOARD/ports.json" + cp "$TMPDIR_ONBOARD/ports.json" "$PORTS_FILE" + ok "Port $PORT allocated for '$PRODUCT' (written to ports.json)" +fi + +echo "" + +# --- Step 5: run-pipeline.sh auto-registration --- + +info "=== Step 5: run-pipeline.sh auto-registration ===" + +# Derive workflow filename and default repo URL +WORKFLOW_NAME="$(echo "$PRODUCT" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')" +REPO_URL="https://github.com/$REPO.git" + +if $DRY_RUN; then + info "[dry-run] Would insert case entry for '$PRODUCT' in run-pipeline.sh before *) catch-all" + info "[dry-run] WORKFLOW=pipeline-config/workflows/WORKFLOW-${WORKFLOW_NAME}.md" + info "[dry-run] DEFAULT_REPO_URL=$REPO_URL" +else + # Check if product is already registered + if grep -q "^ ${PRODUCT})" "$RUN_PIPELINE" 2>/dev/null; then + ok "Product '$PRODUCT' already registered in run-pipeline.sh — skipping" + else + # Insert new case entry before the *) catch-all + CASE_BLOCK=" ${PRODUCT})\\ + WORKFLOW=\"pipeline-config/workflows/WORKFLOW-${WORKFLOW_NAME}.md\"\\ + DEFAULT_REPO_URL=\"${REPO_URL}\"\\ + ;;" + sed -i'' -e "/^ \*)$/i\\ +${CASE_BLOCK}" "$RUN_PIPELINE" + ok "Product '$PRODUCT' registered in run-pipeline.sh" + fi +fi + +echo "" + +# --- Step 6: Generate WORKFLOW file --- + +info "=== Step 6: Generate WORKFLOW file ===" + +WORKFLOW_FILE="$WORKFLOW_DIR/WORKFLOW-${WORKFLOW_NAME}.md" + +if [[ -f "$WORKFLOW_FILE" ]]; then + ok "WORKFLOW file already exists at $WORKFLOW_FILE — skipping" +else + info "Generating $WORKFLOW_FILE..." + if $DRY_RUN; then + info "[dry-run] Would copy WORKFLOW-template.md → WORKFLOW-${WORKFLOW_NAME}.md" + info "[dry-run] Would substitute project_slug: $PROJECT_SLUG" + info "[dry-run] Would substitute port: $PORT" + else + cp "$TEMPLATE_DIR/WORKFLOW-template.md" "$WORKFLOW_FILE" + # Substitute project_slug (only the placeholder value, not the key) + sed -i'' -e "s|project_slug: |project_slug: $PROJECT_SLUG|" "$WORKFLOW_FILE" + # Substitute port (only the default value under server:) + sed -i'' -e "s|port: 4321|port: $PORT|" "$WORKFLOW_FILE" + ok "WORKFLOW file created at $WORKFLOW_FILE" + fi +fi + +echo "" + +# --- Step 7: Verify repo exists --- + +info "=== Step 7: Verify repo exists ===" + +if ! $DRY_RUN; then + check_repo_exists "$REPO" + ok "Repository '$REPO' is accessible" +else + info "[dry-run] Would verify repository '$REPO' exists" +fi + +echo "" + +# --- Step 8: Generate CLAUDE.md in target repo --- + +info "=== Step 8: Generate CLAUDE.md in target repo ===" + +if $DRY_RUN; then + info "[dry-run] Would check if CLAUDE.md exists in $REPO" + info "[dry-run] Would generate CLAUDE.md from template with substitutions:" + info "[dry-run] {{PROJECT_NAME}} → $PRODUCT" + info "[dry-run] {{PORT}} → $PORT" + info "[dry-run] {{REPO_URL}} → https://github.com/$REPO" + info "[dry-run] Would upload CLAUDE.md to $REPO via GitHub API" +else + # Check if CLAUDE.md already exists in the repo + EXISTING_CLAUDE="$(gh api "/repos/$REPO/contents/CLAUDE.md" --jq '.sha' 2>/dev/null || echo "")" + + if [[ -n "$EXISTING_CLAUDE" ]]; then + ok "CLAUDE.md already exists in $REPO — skipping" + else + # Generate CLAUDE.md from template + CLAUDE_MD_TMP="$TMPDIR_ONBOARD/CLAUDE.md" + sed \ + -e "s|{{PROJECT_NAME}}|$PRODUCT|g" \ + -e "s|{{PORT}}|$PORT|g" \ + -e "s|{{REPO_URL}}|https://github.com/$REPO|g" \ + "$TEMPLATE_DIR/CLAUDE.md.tmpl" > "$CLAUDE_MD_TMP" + + # Upload via GitHub API (create file) + CONTENT_B64="$(base64 < "$CLAUDE_MD_TMP")" + gh api "/repos/$REPO/contents/CLAUDE.md" \ + --method PUT \ + --field message="Add CLAUDE.md for Symphony pipeline" \ + --field content="$CONTENT_B64" \ + --silent >/dev/null 2>&1 + ok "CLAUDE.md uploaded to $REPO" + fi +fi + +echo "" + +# --- Step 9: Copy CI minimal workflow to target repo --- + +info "=== Step 9: Copy CI minimal workflow to target repo ===" + +if $DRY_RUN; then + info "[dry-run] Would check if .github/workflows/ci.yml exists in $REPO" + info "[dry-run] Would upload ci-minimal.yml as .github/workflows/ci.yml to $REPO" +else + EXISTING_CI="$(gh api "/repos/$REPO/contents/.github/workflows/ci.yml" --jq '.sha' 2>/dev/null || echo "")" + + if [[ -n "$EXISTING_CI" ]]; then + ok "CI workflow already exists in $REPO — skipping" + else + CI_CONTENT_B64="$(base64 < "$TEMPLATE_DIR/ci-minimal.yml")" + gh api "/repos/$REPO/contents/.github/workflows/ci.yml" \ + --method PUT \ + --field message="Add minimal CI workflow for Symphony pipeline" \ + --field content="$CI_CONTENT_B64" \ + --silent >/dev/null 2>&1 + ok "CI workflow uploaded to $REPO" + fi +fi + +echo "" + +# --- Step 10: Set up GitHub merge queue via Rulesets API --- + +info "=== Step 10: Set up GitHub merge queue via Rulesets API ===" + +if $DRY_RUN; then + info "[dry-run] Would check existing rulesets for $REPO" + info "[dry-run] Would create merge queue ruleset if not present" +else + # Check if a merge queue ruleset already exists + EXISTING_RULESETS="$(gh api "/repos/$REPO/rulesets" 2>/dev/null || echo "[]")" + HAS_MERGE_QUEUE="$(echo "$EXISTING_RULESETS" | jq '[.[] | select(.name == "symphony-merge-queue")] | length' 2>/dev/null || echo "0")" + + if [[ "$HAS_MERGE_QUEUE" -gt 0 ]]; then + ok "Merge queue ruleset already exists in $REPO — skipping" + else + # Create ruleset JSON in temp file + RULESET_JSON="$TMPDIR_ONBOARD/ruleset.json" + cat > "$RULESET_JSON" <<'RULESET_EOF' +{ + "name": "symphony-merge-queue", + "target": "branch", + "enforcement": "active", + "conditions": { + "ref_name": { + "include": ["refs/heads/main"], + "exclude": [] + } + }, + "rules": [ + { + "type": "merge_queue", + "parameters": { + "check_response_timeout_minutes": 60, + "grouping_strategy": "ALLGREEN", + "max_entries_to_build": 5, + "max_entries_to_merge": 5, + "merge_method": "SQUASH", + "min_entries_to_merge": 1, + "min_entries_to_merge_wait_minutes": 5 + } + }, + { + "type": "required_status_checks", + "parameters": { + "required_status_checks": [ + { + "context": "test" + } + ], + "strict_required_status_checks_policy": false + } + } + ], + "bypass_actors": [] +} +RULESET_EOF + + gh api "/repos/$REPO/rulesets" \ + --method POST \ + --input "$RULESET_JSON" \ + --silent >/dev/null 2>&1 + ok "Merge queue ruleset created in $REPO" + fi +fi + +echo "" + +# --- Step 11: Summary --- + +info "=== Step 11: Summary ===" +info " product: $PRODUCT" +info " team: $TEAM_NAME (key: $TEAM_KEY)" +info " project-slug: $PROJECT_SLUG" +info " port: $PORT" +info " repo: $REPO" +info " WORKFLOW: $WORKFLOW_FILE" +info " CLAUDE.md: $REPO/CLAUDE.md" +info " CI workflow: $REPO/.github/workflows/ci.yml" +info " Merge queue: $REPO (symphony-merge-queue ruleset)" +info " run-pipeline: $RUN_PIPELINE (case entry for $PRODUCT)" + +if $DRY_RUN; then + echo "" + info "Dry run complete — no changes were made." +fi + +ok "Onboarding complete for $PRODUCT" diff --git a/ops/token-report.mjs b/ops/token-report.mjs new file mode 100755 index 00000000..9e57eecf --- /dev/null +++ b/ops/token-report.mjs @@ -0,0 +1,2182 @@ +#!/usr/bin/env node +/** + * token-report.mjs — Token history extraction, analysis, HTML reports, Slack digest, log rotation + * + * Subcommands: + * extract — Parse symphony.jsonl logs, extract stage_completed events, + * enrich with Linear issue titles, append to token-history.jsonl (SYMPH-129) + * analyze — Compute efficiency metrics, trends, outliers from token-history.jsonl (SYMPH-130) + * render — Generate self-contained HTML report with inline SVG charts (SYMPH-131) + * slack — Post ≤15-line digest to $SLACK_WEBHOOK_URL (SYMPH-131) + * rotate — Compress/delete old logs and reports (SYMPH-131) + * + * Environment: + * SYMPHONY_HOME (default $HOME/.symphony) + * SYMPHONY_LOG_DIR (default $HOME/Library/Logs/symphony) + * LINEAR_API_KEY — used by `linear` CLI; graceful degradation without it + * SLACK_WEBHOOK_URL — Slack incoming webhook; graceful degradation without it + * BASE_URL — hostname:port for report links (never hardcode localhost) + * TOKEN_REPORT_PORT — port for report server (default 8090) + * + * SYMPH-129, SYMPH-130, SYMPH-131 + */ + +import { execFileSync } from "node:child_process"; +import { createHash } from "node:crypto"; +import { + appendFileSync, + closeSync, + existsSync, + mkdirSync, + openSync, + readFileSync, + readSync, + readdirSync, + statSync, + writeFileSync, +} from "node:fs"; +import { homedir } from "node:os"; +import { basename, join, resolve } from "node:path"; + +// --------------------------------------------------------------------------- +// Config +// --------------------------------------------------------------------------- + +const SYMPHONY_HOME = process.env.SYMPHONY_HOME || join(homedir(), ".symphony"); +const SYMPHONY_LOG_DIR = + process.env.SYMPHONY_LOG_DIR || + join(homedir(), "Library", "Logs", "symphony"); + +const DATA_DIR = join(SYMPHONY_HOME, "data"); +const HWM_DIR = join(DATA_DIR, ".hwm"); +const LINEAR_CACHE_DIR = join(DATA_DIR, "linear-cache"); +const TOKEN_HISTORY_PATH = join(DATA_DIR, "token-history.jsonl"); +const CONFIG_HISTORY_PATH = join(DATA_DIR, "config-history.jsonl"); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function warn(msg) { + process.stderr.write(`WARN: ${msg}\n`); +} + +function info(msg) { + process.stderr.write(`INFO: ${msg}\n`); +} + +/** + * Compute a safe filename key for an HWM file from an absolute log path. + */ +function hwmKeyForPath(logPath) { + return createHash("sha256").update(logPath).digest("hex").slice(0, 16); +} + +/** + * Read HWM state for a log file. Returns { inode, offset }. + */ +function readHwm(logPath) { + const hwmFile = join(HWM_DIR, `${hwmKeyForPath(logPath)}.json`); + if (!existsSync(hwmFile)) return { inode: 0, offset: 0 }; + try { + return JSON.parse(readFileSync(hwmFile, "utf-8")); + } catch { + return { inode: 0, offset: 0 }; + } +} + +/** + * Write HWM state for a log file. + */ +function writeHwm(logPath, state) { + const hwmFile = join(HWM_DIR, `${hwmKeyForPath(logPath)}.json`); + writeFileSync(hwmFile, `${JSON.stringify(state)}\n`); +} + +/** + * Get inode of a file (cross-platform). + */ +function getInode(filePath) { + try { + return statSync(filePath).ino; + } catch { + return 0; + } +} + +/** + * Get file size. + */ +function getFileSize(filePath) { + try { + return statSync(filePath).size; + } catch { + return 0; + } +} + +// --------------------------------------------------------------------------- +// Log reading — inode-aware, truncation-aware, partial-line-safe +// --------------------------------------------------------------------------- + +/** + * Read new complete lines from a log file starting from the HWM. + * Returns { lines: string[], newOffset: number, newInode: number }. + */ +function readNewLines(logPath) { + const hwm = readHwm(logPath); + const currentInode = getInode(logPath); + const currentSize = getFileSize(logPath); + + if (currentSize === 0) { + return { lines: [], newOffset: 0, newInode: currentInode }; + } + + let startOffset = hwm.offset; + + // Inode change → log rotation → reset to beginning + if (currentInode !== hwm.inode && hwm.inode !== 0) { + info( + `Inode changed for ${logPath} (${hwm.inode} → ${currentInode}), resetting HWM`, + ); + startOffset = 0; + } + + // File truncated → reset to beginning + if (currentSize < startOffset) { + info( + `File truncated for ${logPath} (size ${currentSize} < offset ${startOffset}), resetting HWM`, + ); + startOffset = 0; + } + + // Nothing new to read + if (startOffset >= currentSize) { + return { lines: [], newOffset: startOffset, newInode: currentInode }; + } + + // Read the new bytes + const bytesToRead = currentSize - startOffset; + const buf = Buffer.alloc(bytesToRead); + const fd = openSync(logPath, "r"); + try { + readSync(fd, buf, 0, bytesToRead, startOffset); + } finally { + closeSync(fd); + } + + const raw = buf.toString("utf-8"); + + // Find last newline — everything after it is a partial line to discard + const lastNewline = raw.lastIndexOf("\n"); + if (lastNewline === -1) { + // No complete line at all — keep offset where it was + return { lines: [], newOffset: startOffset, newInode: currentInode }; + } + + const completeText = raw.slice(0, lastNewline); + const lines = completeText.split("\n").filter((l) => l.trim().length > 0); + const newOffset = startOffset + lastNewline + 1; + + return { lines, newOffset, newInode: currentInode }; +} + +// --------------------------------------------------------------------------- +// Linear CLI integration +// --------------------------------------------------------------------------- + +let linearAvailable = null; // tri-state: null=unknown, true, false + +function checkLinearAvailable() { + if (linearAvailable !== null) return linearAvailable; + if (!process.env.LINEAR_API_KEY) { + warn("LINEAR_API_KEY not set — issue titles will be null"); + linearAvailable = false; + return false; + } + try { + execFileSync("which", ["linear"], { stdio: "pipe" }); + linearAvailable = true; + } catch { + warn("linear CLI not found in PATH — issue titles will be null"); + linearAvailable = false; + } + return linearAvailable; +} + +/** + * Look up a Linear issue title, with filesystem cache. + * Returns the title string or null. + */ +function getLinearTitle(issueIdentifier) { + if (!issueIdentifier) return null; + + // Check cache first + const cacheFile = join(LINEAR_CACHE_DIR, `${issueIdentifier}.json`); + if (existsSync(cacheFile)) { + try { + const cached = JSON.parse(readFileSync(cacheFile, "utf-8")); + return cached.title ?? null; + } catch { + // Cache corrupt — refetch + } + } + + if (!checkLinearAvailable()) return null; + + try { + const out = execFileSync( + "linear", + ["issue", "view", issueIdentifier, "--json", "--no-pager"], + { stdio: ["pipe", "pipe", "pipe"], timeout: 15000, encoding: "utf-8" }, + ); + const data = JSON.parse(out); + writeFileSync(cacheFile, `${JSON.stringify(data, null, 2)}\n`); + return data.title ?? null; + } catch (err) { + warn(`Failed to fetch Linear title for ${issueIdentifier}: ${err.message}`); + return null; + } +} + +// --------------------------------------------------------------------------- +// Extract subcommand +// --------------------------------------------------------------------------- + +function discoverProducts() { + if (!existsSync(SYMPHONY_LOG_DIR)) return []; + const entries = readdirSync(SYMPHONY_LOG_DIR, { withFileTypes: true }); + return entries + .filter((e) => e.isDirectory()) + .map((e) => ({ + product: e.name, + logPath: join(SYMPHONY_LOG_DIR, e.name, "symphony.jsonl"), + })) + .filter(({ logPath }) => existsSync(logPath)); +} + +/** + * Parse a stage_completed event into a token-history record. + */ +function mapEvent(event, product) { + return { + timestamp: event.timestamp ?? new Date().toISOString(), + product, + issue_id: event.issue_id ?? null, + issue_identifier: event.issue_identifier ?? null, + issue_title: null, // Enriched later + session_id: event.session_id ?? null, + stage_name: event.stage_name ?? null, + outcome: event.outcome ?? null, + total_input_tokens: event.total_input_tokens ?? 0, + total_output_tokens: event.total_output_tokens ?? 0, + total_total_tokens: event.total_total_tokens ?? 0, + no_cache_tokens: event.no_cache_tokens ?? 0, + total_cache_read_tokens: event.total_cache_read_tokens ?? 0, + total_cache_write_tokens: event.total_cache_write_tokens ?? 0, + input_tokens: event.input_tokens ?? 0, + output_tokens: event.output_tokens ?? 0, + total_tokens: event.total_tokens ?? 0, + cache_read_tokens: event.cache_read_tokens ?? 0, + cache_write_tokens: event.cache_write_tokens ?? 0, + reasoning_tokens: event.reasoning_tokens ?? 0, + turns_used: event.turns_used ?? event.turn_count ?? 0, + duration_ms: event.duration_ms ?? 0, + extracted_at: new Date().toISOString(), + }; +} + +function runExtract() { + const products = discoverProducts(); + if (products.length === 0) { + info("No product log directories found"); + } + + let totalExtracted = 0; + let totalSkipped = 0; + const seenIdentifiers = new Set(); + + for (const { product, logPath } of products) { + const fileSize = getFileSize(logPath); + if (fileSize === 0) { + info(`Skipping empty log file: ${logPath}`); + continue; + } + + const { lines, newOffset, newInode } = readNewLines(logPath); + + if (lines.length === 0) { + writeHwm(logPath, { inode: newInode, offset: newOffset }); + continue; + } + + const records = []; + for (const line of lines) { + let event; + try { + event = JSON.parse(line); + } catch { + warn(`Malformed JSONL line in ${logPath}: ${line.slice(0, 100)}`); + totalSkipped++; + continue; + } + + if (event.event !== "stage_completed") continue; + // Accept both completed and failed outcomes + if (event.outcome !== "completed" && event.outcome !== "failed") continue; + + const record = mapEvent(event, product); + if (record.issue_identifier) { + seenIdentifiers.add(record.issue_identifier); + } + records.push(record); + } + + // Enrich with Linear titles (one CLI call per unique identifier) + const titleCache = new Map(); + for (const id of seenIdentifiers) { + if (!titleCache.has(id)) { + titleCache.set(id, getLinearTitle(id)); + } + } + for (const record of records) { + if (record.issue_identifier && titleCache.has(record.issue_identifier)) { + record.issue_title = titleCache.get(record.issue_identifier); + } + } + + // Append to token-history.jsonl + if (records.length > 0) { + const jsonlData = `${records.map((r) => JSON.stringify(r)).join("\n")}\n`; + appendFileSync(TOKEN_HISTORY_PATH, jsonlData); + totalExtracted += records.length; + } + + // Update HWM + writeHwm(logPath, { inode: newInode, offset: newOffset }); + } + + // Snapshot config hashes + snapshotConfigHashes(); + + info( + `Extraction complete: ${totalExtracted} records extracted, ${totalSkipped} lines skipped`, + ); +} + +// --------------------------------------------------------------------------- +// Config hash snapshot +// --------------------------------------------------------------------------- + +function snapshotConfigHashes() { + const scriptDir = resolve(new URL(".", import.meta.url).pathname); + const symphonyRoot = resolve(scriptDir, ".."); + + const configFiles = []; + // Gather known config-ish files + const candidates = [ + "pipeline-config", + "biome.json", + "tsconfig.json", + "tsconfig.build.json", + "vitest.config.ts", + "package.json", + ]; + + for (const candidate of candidates) { + const fullPath = join(symphonyRoot, candidate); + if (!existsSync(fullPath)) continue; + const stat = statSync(fullPath); + if (stat.isDirectory()) { + gatherFiles(fullPath, configFiles); + } else { + configFiles.push(fullPath); + } + } + + // Also gather SKILL.md files from pipeline-config and any subdirectories + const skillFiles = []; + gatherFilesByPattern(symphonyRoot, "SKILL.md", skillFiles); + + const hashes = {}; + for (const file of [...configFiles, ...skillFiles]) { + try { + const relPath = file.replace(`${symphonyRoot}/`, ""); + const content = readFileSync(file); + hashes[relPath] = createHash("sha256") + .update(content) + .digest("hex") + .slice(0, 16); + } catch { + // Skip unreadable files + } + } + + const snapshot = { + timestamp: new Date().toISOString(), + config_hashes: hashes, + file_count: Object.keys(hashes).length, + }; + + appendFileSync(CONFIG_HISTORY_PATH, `${JSON.stringify(snapshot)}\n`); +} + +function gatherFiles(dir, out) { + try { + const entries = readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = join(dir, entry.name); + if (entry.isDirectory()) { + if ( + entry.name === "node_modules" || + entry.name === ".git" || + entry.name === "dist" + ) + continue; + gatherFiles(fullPath, out); + } else { + out.push(fullPath); + } + } + } catch { + // Skip unreadable directories + } +} + +function gatherFilesByPattern(dir, pattern, out) { + try { + const entries = readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = join(dir, entry.name); + if (entry.isDirectory()) { + if ( + entry.name === "node_modules" || + entry.name === ".git" || + entry.name === "dist" + ) + continue; + gatherFilesByPattern(fullPath, pattern, out); + } else if (entry.name === pattern) { + out.push(fullPath); + } + } + } catch { + // Skip unreadable directories + } +} + +// --------------------------------------------------------------------------- +// Analyze subcommand — SYMPH-130 +// --------------------------------------------------------------------------- + +/** + * Read all records from a JSONL file. Returns [] if file missing/empty. + */ +function readJsonl(filePath) { + if (!existsSync(filePath)) return []; + const content = readFileSync(filePath, "utf-8").trim(); + if (!content) return []; + return content + .split("\n") + .filter(Boolean) + .map((line) => { + try { + return JSON.parse(line); + } catch { + return null; + } + }) + .filter(Boolean); +} + +/** + * Compute median of a numeric array. Returns 0 for empty arrays. + */ +function median(arr) { + if (arr.length === 0) return 0; + const sorted = [...arr].sort((a, b) => a - b); + const mid = Math.floor(sorted.length / 2); + return sorted.length % 2 === 0 + ? (sorted[mid - 1] + sorted[mid]) / 2 + : sorted[mid]; +} + +/** + * Compute mean of a numeric array. Returns 0 for empty arrays. + */ +function mean(arr) { + if (arr.length === 0) return 0; + return arr.reduce((s, v) => s + v, 0) / arr.length; +} + +/** + * Compute standard deviation. + */ +function stddev(arr) { + if (arr.length < 2) return 0; + const m = mean(arr); + const variance = arr.reduce((s, v) => s + (v - m) ** 2, 0) / arr.length; + return Math.sqrt(variance); +} + +/** + * Round to specified decimal places. + */ +function round(val, decimals = 1) { + const factor = 10 ** decimals; + return Math.round(val * factor) / factor; +} + +/** + * Parse timestamp to Date object. + */ +function parseTs(ts) { + return new Date(ts); +} + +/** + * Get days ago boundary from a reference date. + */ +function daysAgo(days, refDate = new Date()) { + const d = new Date(refDate); + d.setDate(d.getDate() - days); + d.setHours(0, 0, 0, 0); + return d; +} + +/** + * Get the date string (YYYY-MM-DD) from a Date. + */ +function dateKey(d) { + return d.toISOString().slice(0, 10); +} + +/** + * Filter records to a date window (>= start, < end). + */ +function filterByDateRange(records, startDate, endDate) { + const start = startDate.getTime(); + const end = endDate.getTime(); + return records.filter((r) => { + const t = parseTs(r.timestamp).getTime(); + return t >= start && t < end; + }); +} + +/** + * Compute data span in days. + */ +function dataSpanDays(records) { + if (records.length === 0) return 0; + const timestamps = records.map((r) => parseTs(r.timestamp).getTime()); + const minT = Math.min(...timestamps); + const maxT = Math.max(...timestamps); + return Math.ceil((maxT - minT) / (1000 * 60 * 60 * 24)); +} + +/** + * Determine cold-start tier: "<7d", "7-29d", ">=30d". + */ +function coldStartTier(spanDays) { + if (spanDays < 7) return "<7d"; + if (spanDays < 30) return "7-29d"; + return ">=30d"; +} + +/** + * Compute efficiency scorecard metrics for a set of records. + * Failed stages are excluded from efficiency metrics but included in spend. + */ +function computeEfficiencyScorecard(records) { + const completed = records.filter((r) => r.outcome === "completed"); + + let totalInput = 0; + let totalOutput = 0; + let totalTotal = 0; + let totalCacheRead = 0; + let totalNoCache = 0; + let totalTurns = 0; + + for (const r of completed) { + totalInput += r.total_input_tokens ?? 0; + totalOutput += r.total_output_tokens ?? 0; + totalTotal += r.total_total_tokens ?? 0; + totalCacheRead += r.total_cache_read_tokens ?? 0; + totalNoCache += r.no_cache_tokens ?? 0; + totalTurns += r.turns_used ?? 0; + } + + // Cache efficiency: cache_read / (input + cache_read) * 100 + const inputPlusCacheRead = totalInput + totalCacheRead; + const cacheEfficiency = + inputPlusCacheRead > 0 ? (totalCacheRead / inputPlusCacheRead) * 100 : 0; + + // Output ratio: output / total * 100 + const outputRatio = totalTotal > 0 ? (totalOutput / totalTotal) * 100 : 0; + + // Wasted context: no_cache / input * 100 + const wastedContext = totalInput > 0 ? (totalNoCache / totalInput) * 100 : 0; + + // Tokens per turn + const tokensPerTurn = totalTurns > 0 ? totalTotal / totalTurns : 0; + + // First-pass rate: 1 - (issues with >1 implement completed) / (total unique issues) + const issueImplementCounts = {}; + for (const r of records) { + if ( + r.stage_name === "implement" && + r.outcome === "completed" && + r.issue_identifier + ) { + issueImplementCounts[r.issue_identifier] = + (issueImplementCounts[r.issue_identifier] ?? 0) + 1; + } + } + const totalUniqueIssues = Object.keys(issueImplementCounts).length; + const reworkIssues = Object.values(issueImplementCounts).filter( + (c) => c > 1, + ).length; + const firstPassRate = + totalUniqueIssues > 0 ? (1 - reworkIssues / totalUniqueIssues) * 100 : 100; + + // Failure rate per stage type + const stageTotal = {}; + const stageFailed = {}; + for (const r of records) { + if (!r.stage_name) continue; + stageTotal[r.stage_name] = (stageTotal[r.stage_name] ?? 0) + 1; + if (r.outcome === "failed") { + stageFailed[r.stage_name] = (stageFailed[r.stage_name] ?? 0) + 1; + } + } + const failureRate = {}; + for (const stage of Object.keys(stageTotal)) { + failureRate[stage] = round( + ((stageFailed[stage] ?? 0) / stageTotal[stage]) * 100, + 1, + ); + } + + return { + cache_efficiency: round(cacheEfficiency, 1), + output_ratio: round(outputRatio, 1), + wasted_context: round(wastedContext, 1), + tokens_per_turn: round(tokensPerTurn, 0), + first_pass_rate: round(firstPassRate, 1), + failure_rate: failureRate, + }; +} + +/** + * Compute efficiency scorecard with trends (current, 7d, 30d). + */ +function computeScorecardWithTrends(records, now) { + const d7 = daysAgo(7, now); + const d30 = daysAgo(30, now); + + const currentScorecard = computeEfficiencyScorecard(records); + const last7Records = filterByDateRange(records, d7, now); + const last30Records = filterByDateRange(records, d30, now); + const scorecard7 = computeEfficiencyScorecard(last7Records); + const scorecard30 = computeEfficiencyScorecard(last30Records); + + const result = {}; + for (const key of [ + "cache_efficiency", + "output_ratio", + "wasted_context", + "tokens_per_turn", + "first_pass_rate", + ]) { + result[key] = { + current: currentScorecard[key], + trend_7d: scorecard7[key], + trend_30d: scorecard30[key], + }; + } + + // failure_rate has nested structure (per stage) + result.failure_rate = { + current: currentScorecard.failure_rate, + trend_7d: scorecard7.failure_rate, + trend_30d: scorecard30.failure_rate, + }; + + return result; +} + +/** + * Compute WoW delta. Returns null if insufficient data. + * wow_delta_pct = (current_week - prior_week) / prior_week * 100 + */ +function computeWowDelta(records, metricFn, now) { + const d7 = daysAgo(7, now); + const d14 = daysAgo(14, now); + + const currentWeekRecords = filterByDateRange(records, d7, now); + const priorWeekRecords = filterByDateRange(records, d14, d7); + + if (currentWeekRecords.length === 0 || priorWeekRecords.length === 0) { + return null; + } + + const currentVal = metricFn(currentWeekRecords); + const priorVal = metricFn(priorWeekRecords); + + if (priorVal === 0) return null; + return round(((currentVal - priorVal) / priorVal) * 100, 1); +} + +/** + * Build executive summary with WoW deltas. + */ +function buildExecutiveSummary(records, spanDays, now) { + const hasWow = spanDays >= 14; + + const totalTokens = records.reduce( + (s, r) => s + (r.total_total_tokens ?? 0), + 0, + ); + const totalStages = records.length; + + const summary = { + total_tokens: { value: totalTokens }, + total_stages: { value: totalStages }, + unique_issues: { + value: new Set(records.map((r) => r.issue_identifier).filter(Boolean)) + .size, + }, + data_span_days: spanDays, + }; + + if (hasWow) { + const tokenFn = (recs) => + recs.reduce((s, r) => s + (r.total_total_tokens ?? 0), 0); + const stageFn = (recs) => recs.length; + + summary.total_tokens.wow_delta_pct = computeWowDelta(records, tokenFn, now); + summary.total_stages.wow_delta_pct = computeWowDelta(records, stageFn, now); + } + + return summary; +} + +/** + * Detect config-change markers from config-history.jsonl. + */ +function computeConfigChangeMarkers(configRecords) { + if (configRecords.length < 2) return []; + const markers = []; + for (let i = 1; i < configRecords.length; i++) { + const prev = configRecords[i - 1]; + const curr = configRecords[i]; + const prevHashes = prev.config_hashes ?? {}; + const currHashes = curr.config_hashes ?? {}; + + const changedFiles = []; + const allKeys = new Set([ + ...Object.keys(prevHashes), + ...Object.keys(currHashes), + ]); + for (const key of allKeys) { + if (prevHashes[key] !== currHashes[key]) { + changedFiles.push(key); + } + } + if (changedFiles.length > 0) { + markers.push({ + date: dateKey(parseTs(curr.timestamp)), + timestamp: curr.timestamp, + changed_files: changedFiles, + }); + } + } + return markers; +} + +/** + * Compute per-stage utilization trend: daily avg tokens per stage. + */ +function computePerStageTrend(records, configRecords) { + const stageNames = [ + ...new Set(records.map((r) => r.stage_name).filter(Boolean)), + ]; + const trend = {}; + + for (const stage of stageNames) { + const stageRecords = records.filter((r) => r.stage_name === stage); + // Group by date + const byDate = {}; + for (const r of stageRecords) { + const dk = dateKey(parseTs(r.timestamp)); + if (!byDate[dk]) byDate[dk] = []; + byDate[dk].push(r.total_total_tokens ?? 0); + } + const dailyAvg = {}; + for (const [date, vals] of Object.entries(byDate)) { + dailyAvg[date] = round(mean(vals), 0); + } + trend[stage] = { daily_avg: dailyAvg }; + } + + // Add config-change markers at dates where config hashes changed + const configMarkers = computeConfigChangeMarkers(configRecords); + if (configMarkers.length > 0) { + for (const stage of stageNames) { + trend[stage].config_changes = configMarkers; + } + } + + return trend; +} + +/** + * Compute per-ticket cost trend: rolling median and mean of total tokens per ticket. + */ +function computePerTicketTrend(records) { + const issueTokens = {}; + for (const r of records) { + const id = r.issue_identifier; + if (!id) continue; + issueTokens[id] = (issueTokens[id] ?? 0) + (r.total_total_tokens ?? 0); + } + const values = Object.values(issueTokens); + return { + median: round(median(values), 0), + mean: round(mean(values), 0), + ticket_count: values.length, + }; +} + +/** + * Compute per-product breakdown. + */ +function computePerProduct(records) { + const byProduct = {}; + for (const r of records) { + const p = r.product ?? "unknown"; + if (!byProduct[p]) { + byProduct[p] = { + total_tokens: 0, + total_stages: 0, + unique_issues: new Set(), + }; + } + byProduct[p].total_tokens += r.total_total_tokens ?? 0; + byProduct[p].total_stages += 1; + if (r.issue_identifier) byProduct[p].unique_issues.add(r.issue_identifier); + } + const result = {}; + for (const [product, data] of Object.entries(byProduct)) { + result[product] = { + total_tokens: data.total_tokens, + total_stages: data.total_stages, + unique_issues: data.unique_issues.size, + }; + } + return result; +} + +/** + * Detect inflections: points where 7d avg crosses 30d avg by >15%. + * Requires >=20 samples in baseline (30d). + */ +function detectInflections(records, configRecords, now) { + const stageNames = [ + ...new Set(records.map((r) => r.stage_name).filter(Boolean)), + ]; + const inflections = []; + + for (const stage of stageNames) { + const stageRecords = records.filter((r) => r.stage_name === stage); + + const d7 = daysAgo(7, now); + const d30 = daysAgo(30, now); + const last30 = filterByDateRange(stageRecords, d30, now); + const last7 = filterByDateRange(stageRecords, d7, now); + + // Need >=20 samples in 30d baseline + if (last30.length < 20) continue; + + const avg30 = mean(last30.map((r) => r.total_total_tokens ?? 0)); + const avg7 = mean(last7.map((r) => r.total_total_tokens ?? 0)); + + if (avg30 === 0) continue; + const pctChange = ((avg7 - avg30) / avg30) * 100; + + if (Math.abs(pctChange) <= 15) continue; + + const direction = pctChange > 0 ? "increase" : "decrease"; + const attributions = []; + + // Ticket-mix attribution: analyze complexity distribution in +-48h window + const windowStart = new Date(d7.getTime() - 48 * 60 * 60 * 1000); + const windowEnd = new Date(d7.getTime() + 48 * 60 * 60 * 1000); + const windowRecords = filterByDateRange( + stageRecords, + windowStart, + windowEnd, + ); + const baselineRecords = filterByDateRange(stageRecords, d30, d7); + + if (windowRecords.length > 0 && baselineRecords.length > 0) { + const windowAvgTokens = mean( + windowRecords.map((r) => r.total_total_tokens ?? 0), + ); + const baselineAvgTokens = mean( + baselineRecords.map((r) => r.total_total_tokens ?? 0), + ); + const windowIssues = [ + ...new Set( + windowRecords.map((r) => r.issue_identifier).filter(Boolean), + ), + ]; + + attributions.push({ + type: "ticket_mix", + description: `coincides with ${windowIssues.length} tickets in window averaging ${round(windowAvgTokens, 0)} tokens vs baseline ${round(baselineAvgTokens, 0)}`, + window_issues: windowIssues, + window_avg_tokens: round(windowAvgTokens, 0), + baseline_avg_tokens: round(baselineAvgTokens, 0), + }); + } + + // Config-change correlation: check for hash changes within 2 days prior + const configChanges = computeConfigChangeMarkers(configRecords); + for (const change of configChanges) { + const changeDate = parseTs(change.timestamp); + const changeDaysAgo = + (d7.getTime() - changeDate.getTime()) / (1000 * 60 * 60 * 24); + if (changeDaysAgo >= 0 && changeDaysAgo <= 2) { + attributions.push({ + type: "config_change", + description: `coincides with config change on ${change.date}: ${change.changed_files.join(", ")}`, + date: change.date, + changed_files: change.changed_files, + }); + } + } + + inflections.push({ + stage, + direction, + pct_change: round(pctChange, 1), + avg_7d: round(avg7, 0), + avg_30d: round(avg30, 0), + attributions, + }); + } + + return inflections; +} + +/** + * Get Linear parent spec for an issue using the Linear CLI. + * Caches results in linear-cache/{identifier}-parent.json. + */ +function getLinearParentSpec(issueId, issueIdentifier) { + if (!issueId && !issueIdentifier) return null; + + const cacheKey = issueIdentifier ?? issueId; + const cacheFile = join(LINEAR_CACHE_DIR, `${cacheKey}-parent.json`); + if (existsSync(cacheFile)) { + try { + return JSON.parse(readFileSync(cacheFile, "utf-8")); + } catch { + // Cache corrupt — refetch + } + } + + if (!checkLinearAvailable()) return null; + + try { + const query = `{ issue(id: "${issueId}") { parent { identifier title description } } }`; + const out = execFileSync("linear", ["api", query, "--silent"], { + stdio: ["pipe", "pipe", "pipe"], + timeout: 15000, + encoding: "utf-8", + }); + const data = JSON.parse(out); + const parent = data?.data?.issue?.parent ?? null; + writeFileSync(cacheFile, `${JSON.stringify(parent, null, 2)}\n`); + return parent; + } catch (err) { + warn(`Failed to fetch Linear parent for ${cacheKey}: ${err.message}`); + return null; + } +} + +/** + * Classify a parent spec's complexity based on description content. + */ +function classifyParentComplexity(parent) { + if (!parent || !parent.description) + return { classification: "UNKNOWN", task_count: 0 }; + const desc = parent.description; + const taskLines = desc + .split("\n") + .filter( + (l) => + /^\s*[-*]\s*\[/.test(l) || + /^\s*\d+[.)]\s/.test(l) || + /^\s*[-*]\s+\S/.test(l), + ); + const taskCount = taskLines.length; + let classification = "SIMPLE"; + if (taskCount >= 8) classification = "COMPLEX"; + else if (taskCount >= 4) classification = "MODERATE"; + return { classification, task_count: taskCount }; +} + +/** + * Detect outliers (>2σ) and generate hypotheses with Linear parent spec. + */ +function detectOutliers(records) { + const issueTokens = {}; + const issueMeta = {}; + for (const r of records) { + const id = r.issue_identifier; + if (!id) continue; + issueTokens[id] = (issueTokens[id] ?? 0) + (r.total_total_tokens ?? 0); + if (!issueMeta[id]) { + issueMeta[id] = { + issue_id: r.issue_id, + issue_identifier: id, + issue_title: r.issue_title, + }; + } + } + + const values = Object.values(issueTokens); + if (values.length < 3) return []; + + const m = mean(values); + const sd = stddev(values); + if (sd === 0) return []; + + const threshold = m + 2 * sd; + const outliers = []; + + for (const [identifier, tokens] of Object.entries(issueTokens)) { + if (tokens > threshold) { + const meta = issueMeta[identifier]; + const zScore = round((tokens - m) / sd, 1); + + // Try to get parent spec from Linear for hypothesis + const parent = getLinearParentSpec(meta.issue_id, meta.issue_identifier); + const complexity = classifyParentComplexity(parent); + + const hypothesis = parent + ? `Parent spec "${parent.title}" (${parent.identifier}) classified ${complexity.classification} with ${complexity.task_count} tasks — high token usage may reflect spec complexity` + : "Linear parent spec unavailable — unable to determine complexity attribution"; + + outliers.push({ + issue_identifier: identifier, + issue_title: meta.issue_title, + total_tokens: tokens, + z_score: zScore, + threshold: round(threshold, 0), + mean: round(m, 0), + stddev: round(sd, 0), + parent: parent + ? { + identifier: parent.identifier, + title: parent.title, + complexity: complexity.classification, + task_count: complexity.task_count, + } + : null, + hypothesis, + }); + } + } + + outliers.sort((a, b) => b.total_tokens - a.total_tokens); + return outliers; +} + +/** + * Compute per-stage token spend (includes ALL stages, both completed and failed). + */ +function computePerStageSpend(records) { + const byStage = {}; + for (const r of records) { + const s = r.stage_name ?? "unknown"; + if (!byStage[s]) { + byStage[s] = { total_tokens: 0, count: 0, completed: 0, failed: 0 }; + } + byStage[s].total_tokens += r.total_total_tokens ?? 0; + byStage[s].count += 1; + if (r.outcome === "completed") byStage[s].completed += 1; + if (r.outcome === "failed") byStage[s].failed += 1; + } + return byStage; +} + +/** + * Compute analysis result object from token/config history. + * Returns the result object (does not write to stdout). + */ +function computeAnalysis() { + const records = readJsonl(TOKEN_HISTORY_PATH); + const configRecords = readJsonl(CONFIG_HISTORY_PATH); + + if (records.length === 0) { + return { + cold_start: true, + cold_start_tier: "<7d", + message: "No token history data available", + efficiency_scorecard: { + cache_efficiency: { current: 0, trend_7d: 0, trend_30d: 0 }, + output_ratio: { current: 0, trend_7d: 0, trend_30d: 0 }, + wasted_context: { current: 0, trend_7d: 0, trend_30d: 0 }, + tokens_per_turn: { current: 0, trend_7d: 0, trend_30d: 0 }, + first_pass_rate: { current: 100, trend_7d: 100, trend_30d: 100 }, + failure_rate: { current: {}, trend_7d: {}, trend_30d: {} }, + }, + executive_summary: { + total_tokens: { value: 0 }, + total_stages: { value: 0 }, + unique_issues: { value: 0 }, + data_span_days: 0, + }, + per_stage_spend: {}, + per_stage_trend: {}, + per_ticket_trend: { median: 0, mean: 0, ticket_count: 0 }, + per_product: {}, + inflections: [], + outliers: [], + }; + } + + const spanDays = dataSpanDays(records); + const tier = coldStartTier(spanDays); + const now = new Date(); + + const isColdStart = spanDays < 7; + + const scorecard = computeScorecardWithTrends(records, now); + const executiveSummary = buildExecutiveSummary(records, spanDays, now); + const perStageSpend = computePerStageSpend(records); + const perStageTrend = computePerStageTrend(records, configRecords); + const perTicketTrend = computePerTicketTrend(records); + const perProduct = computePerProduct(records); + + // Inflection detection and outliers: only meaningful with sufficient data + let inflections = []; + let outliers = []; + + if (tier === ">=30d") { + inflections = detectInflections(records, configRecords, now); + outliers = detectOutliers(records); + } else if (tier === "7-29d") { + outliers = detectOutliers(records); + inflections = []; + } + + return { + ...(isColdStart && { cold_start: true }), + cold_start_tier: tier, + ...(isColdStart && { + message: + "insufficient data — rolling averages, inflection detection, and attribution require >=7 days", + }), + analyzed_at: now.toISOString(), + data_span_days: spanDays, + record_count: records.length, + efficiency_scorecard: scorecard, + executive_summary: executiveSummary, + per_stage_spend: perStageSpend, + per_stage_trend: perStageTrend, + per_ticket_trend: perTicketTrend, + per_product: perProduct, + inflections: + tier === "<7d" ? { status: "insufficient data", items: [] } : inflections, + outliers: + tier === "<7d" ? { status: "insufficient data", items: [] } : outliers, + }; +} + +/** + * Main analyze function — prints analysis JSON to stdout. + */ +function runAnalyze() { + const result = computeAnalysis(); + process.stdout.write(`${JSON.stringify(result, null, 2)}\n`); +} + +// --------------------------------------------------------------------------- +// Render subcommand — SYMPH-131 +// --------------------------------------------------------------------------- + +const REPORTS_DIR = join(SYMPHONY_HOME, "reports"); + +/** + * Format a number with thousands separators. + */ +function fmtNum(n) { + if (n == null || Number.isNaN(n)) return "0"; + return Math.round(n).toLocaleString("en-US"); +} + +/** + * Escape HTML special characters. + */ +function escHtml(s) { + if (!s) return ""; + return String(s) + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """); +} + +/** + * Generate an inline SVG sparkline from an array of {x, y} points. + * x values are normalized to [0, width], y values to [0, height]. + */ +function sparklineSvg(values, opts = {}) { + const { + width = 120, + height = 30, + stroke = "#58a6ff", + strokeWidth = 1.5, + } = opts; + if (!values || values.length < 2) { + return ``; + } + const minY = Math.min(...values); + const maxY = Math.max(...values); + const rangeY = maxY - minY || 1; + const points = values + .map((v, i) => { + const x = (i / (values.length - 1)) * width; + const y = height - ((v - minY) / rangeY) * (height - 4) - 2; + return `${round(x, 1)},${round(y, 1)}`; + }) + .join(" "); + return ``; +} + +/** + * Build a multi-line SVG chart for per-stage trends. + */ +function multiLineSvg(stageData, configChanges, opts = {}) { + const { width = 600, height = 200 } = opts; + const colors = [ + "#58a6ff", + "#3fb950", + "#d29922", + "#f85149", + "#bc8cff", + "#79c0ff", + "#56d364", + "#e3b341", + ]; + const allDates = new Set(); + for (const stage of Object.keys(stageData)) { + for (const d of Object.keys(stageData[stage].daily_avg ?? {})) { + allDates.add(d); + } + } + const sortedDates = [...allDates].sort(); + if (sortedDates.length < 2) { + return `Insufficient data for trend chart`; + } + + const stages = Object.keys(stageData); + const allVals = []; + for (const stage of stages) { + const avg = stageData[stage].daily_avg ?? {}; + for (const d of sortedDates) { + if (avg[d] != null) allVals.push(avg[d]); + } + } + const minY = Math.min(...allVals, 0); + const maxY = Math.max(...allVals, 1); + const rangeY = maxY - minY || 1; + const padL = 50; + const padR = 10; + const padT = 10; + const padB = 25; + const chartW = width - padL - padR; + const chartH = height - padT - padB; + + let svg = ``; + + // Grid lines + for (let i = 0; i <= 4; i++) { + const y = padT + (chartH / 4) * i; + const val = maxY - (rangeY / 4) * i; + svg += ``; + svg += `${fmtNum(val)}`; + } + + // Config change markers + if (configChanges) { + for (const cc of configChanges) { + const idx = sortedDates.indexOf(cc.date); + if (idx >= 0) { + const x = padL + (idx / (sortedDates.length - 1)) * chartW; + svg += ``; + svg += ``; + } + } + } + + // Stage lines + stages.forEach((stage, si) => { + const avg = stageData[stage].daily_avg ?? {}; + const pts = []; + for (const d of sortedDates) { + if (avg[d] != null) { + const x = + padL + (sortedDates.indexOf(d) / (sortedDates.length - 1)) * chartW; + const y = padT + chartH - ((avg[d] - minY) / rangeY) * chartH; + pts.push(`${round(x, 1)},${round(y, 1)}`); + } + } + if (pts.length > 1) { + const color = colors[si % colors.length]; + svg += ``; + } + }); + + // Legend + stages.forEach((stage, si) => { + const x = padL + si * 100; + const color = colors[si % colors.length]; + svg += ``; + svg += `${escHtml(stage)}`; + }); + + svg += ""; + return svg; +} + +/** + * Format a WoW delta as colored text. + */ +function wowBadge(delta) { + if (delta == null) return ''; + const sign = delta > 0 ? "+" : ""; + const color = delta > 0 ? "#f85149" : delta < 0 ? "#3fb950" : "#8b949e"; + return `${sign}${delta}% WoW`; +} + +/** + * Render self-contained HTML report from analysis JSON. + */ +function renderHtml(analysis) { + const today = dateKey(new Date()); + const es = analysis.executive_summary ?? {}; + const sc = analysis.efficiency_scorecard ?? {}; + const perStageTrend = analysis.per_stage_trend ?? {}; + const perTicket = analysis.per_ticket_trend ?? {}; + const outliers = Array.isArray(analysis.outliers) + ? analysis.outliers + : (analysis.outliers?.items ?? []); + const perStageSpend = analysis.per_stage_spend ?? {}; + const perProduct = analysis.per_product ?? {}; + const inflections = Array.isArray(analysis.inflections) + ? analysis.inflections + : (analysis.inflections?.items ?? []); + + // Compute tokens-per-issue median and mean from records + const records = readJsonl(TOKEN_HISTORY_PATH); + const issueTokens = {}; + for (const r of records) { + const id = r.issue_identifier; + if (!id) continue; + issueTokens[id] = (issueTokens[id] ?? 0) + (r.total_total_tokens ?? 0); + } + const issueValues = Object.values(issueTokens); + const tokensPerIssueMedian = median(issueValues); + const tokensPerIssueMean = mean(issueValues); + + // Compute cache hit rate from efficiency scorecard + const cacheHitRate = sc.cache_efficiency?.current ?? 0; + + // Build sparkline data for efficiency scorecard metrics (30-day daily) + function buildDailyMetricSeries(metricFn) { + const now = new Date(); + const vals = []; + for (let i = 29; i >= 0; i--) { + const dayStart = daysAgo(i + 1, now); + const dayEnd = daysAgo(i, now); + const dayRecords = filterByDateRange(records, dayStart, dayEnd); + if (dayRecords.length > 0) { + vals.push(metricFn(dayRecords)); + } + } + return vals; + } + + const cacheEffSeries = buildDailyMetricSeries((recs) => { + const sc2 = computeEfficiencyScorecard(recs); + return sc2.cache_efficiency; + }); + const outputRatioSeries = buildDailyMetricSeries((recs) => { + const sc2 = computeEfficiencyScorecard(recs); + return sc2.output_ratio; + }); + const wastedCtxSeries = buildDailyMetricSeries((recs) => { + const sc2 = computeEfficiencyScorecard(recs); + return sc2.wasted_context; + }); + const tokPerTurnSeries = buildDailyMetricSeries((recs) => { + const sc2 = computeEfficiencyScorecard(recs); + return sc2.tokens_per_turn; + }); + const firstPassSeries = buildDailyMetricSeries((recs) => { + const sc2 = computeEfficiencyScorecard(recs); + return sc2.first_pass_rate; + }); + const failureRateSeries = buildDailyMetricSeries((recs) => { + const total = recs.length; + const failed = recs.filter((r) => r.outcome === "failed").length; + return total > 0 ? (failed / total) * 100 : 0; + }); + + // Per-ticket trend sparkline (rolling median by date) + const perTicketSeries = (() => { + const byDate = {}; + for (const r of records) { + if (!r.issue_identifier) continue; + const dk = dateKey(parseTs(r.timestamp)); + if (!byDate[dk]) byDate[dk] = {}; + byDate[dk][r.issue_identifier] = + (byDate[dk][r.issue_identifier] ?? 0) + (r.total_total_tokens ?? 0); + } + const sortedDates = Object.keys(byDate).sort(); + return sortedDates.map((d) => median(Object.values(byDate[d]))); + })(); + + // Config changes for multi-line chart + const firstStageKey = Object.keys(perStageTrend)[0]; + const configChanges = firstStageKey + ? (perStageTrend[firstStageKey].config_changes ?? []) + : []; + + // Build issue leaderboard sorted by spend + const leaderboard = Object.entries(issueTokens) + .map(([id, tokens]) => { + // Find title from records + const rec = records.find((r) => r.issue_identifier === id); + return { identifier: id, title: rec?.issue_title ?? "", tokens }; + }) + .sort((a, b) => b.tokens - a.tokens); + + // Per-stage sparklines + const stageSparklines = {}; + for (const stage of Object.keys(perStageTrend)) { + const dailyAvg = perStageTrend[stage].daily_avg ?? {}; + const sortedDates = Object.keys(dailyAvg).sort(); + stageSparklines[stage] = sortedDates.map((d) => dailyAvg[d]); + } + + // WoW deltas for KPIs + const tokensDelta = es.total_tokens?.wow_delta_pct; + const stagesDelta = es.total_stages?.wow_delta_pct; + + // Compute tokens-per-issue WoW delta + const tokPerIssueWow = (() => { + if (records.length === 0) return null; + const now = new Date(); + const d7 = daysAgo(7, now); + const d14 = daysAgo(14, now); + const curr = filterByDateRange(records, d7, now); + const prev = filterByDateRange(records, d14, d7); + if (curr.length === 0 || prev.length === 0) return null; + const currIssues = {}; + for (const r of curr) { + if (r.issue_identifier) + currIssues[r.issue_identifier] = + (currIssues[r.issue_identifier] ?? 0) + (r.total_total_tokens ?? 0); + } + const prevIssues = {}; + for (const r of prev) { + if (r.issue_identifier) + prevIssues[r.issue_identifier] = + (prevIssues[r.issue_identifier] ?? 0) + (r.total_total_tokens ?? 0); + } + const currMedian = median(Object.values(currIssues)); + const prevMedian = median(Object.values(prevIssues)); + if (prevMedian === 0) return null; + return round(((currMedian - prevMedian) / prevMedian) * 100, 1); + })(); + + // Cache hit rate WoW delta + const cacheWow = (() => { + const now = new Date(); + const d7 = daysAgo(7, now); + const d14 = daysAgo(14, now); + const curr = filterByDateRange(records, d7, now); + const prev = filterByDateRange(records, d14, d7); + if (curr.length === 0 || prev.length === 0) return null; + const currSc = computeEfficiencyScorecard(curr); + const prevSc = computeEfficiencyScorecard(prev); + if (prevSc.cache_efficiency === 0) return null; + return round( + ((currSc.cache_efficiency - prevSc.cache_efficiency) / + prevSc.cache_efficiency) * + 100, + 1, + ); + })(); + + const html = ` + + + + +Symphony Token Report — ${escHtml(today)} + + + +

Symphony Token Report

+

Generated ${escHtml(today)} · ${fmtNum(analysis.record_count ?? 0)} records · ${analysis.data_span_days ?? 0} day span

+ + +

Executive Summary

+
+
+
Total Tokens
+
${fmtNum(es.total_tokens?.value)}
+
${wowBadge(tokensDelta)}
+
+
+
Tokens / Issue (median)
+
${fmtNum(tokensPerIssueMedian)}
+
mean: ${fmtNum(tokensPerIssueMean)} ${wowBadge(tokPerIssueWow)}
+
+
+
Issues Processed
+
${fmtNum(es.unique_issues?.value)}
+
+
+
Cache Hit Rate
+
${round(cacheHitRate, 1)}%
+
${wowBadge(cacheWow)}
+
+
+ + +

Efficiency Scorecard

+
+ Cache Efficiency + ${round(sc.cache_efficiency?.current ?? 0, 1)}% + ${sparklineSvg(cacheEffSeries)} +
+
+ Output Ratio + ${round(sc.output_ratio?.current ?? 0, 1)}% + ${sparklineSvg(outputRatioSeries, { stroke: "#3fb950" })} +
+
+ Wasted Context + ${round(sc.wasted_context?.current ?? 0, 1)}% + ${sparklineSvg(wastedCtxSeries, { stroke: "#d29922" })} +
+
+ Tokens / Turn + ${fmtNum(sc.tokens_per_turn?.current ?? 0)} + ${sparklineSvg(tokPerTurnSeries, { stroke: "#bc8cff" })} +
+
+ First-Pass Rate + ${round(sc.first_pass_rate?.current ?? 0, 1)}% + ${sparklineSvg(firstPassSeries, { stroke: "#56d364" })} +
+
+ Failure Rate (all stages) + ${(() => { + const fr = sc.failure_rate?.current ?? {}; + const rates = Object.values(fr); + return rates.length > 0 ? `${round(mean(rates), 1)}%` : "0%"; + })()} + ${sparklineSvg(failureRateSeries, { stroke: "#f85149" })} +
+ + +

Per-Stage Utilization Trend

+
+${multiLineSvg(perStageTrend, configChanges)} +${ + inflections.length > 0 + ? inflections + .map( + (inf) => ` +
+
⚡ Inflection: ${escHtml(inf.stage)} — ${escHtml(inf.direction)} ${inf.pct_change}%
+
7d avg: ${fmtNum(inf.avg_7d)} · 30d avg: ${fmtNum(inf.avg_30d)}${inf.attributions?.length > 0 ? ` · ${inf.attributions.map((a) => escHtml(a.description)).join("; ")}` : ""}
+
`, + ) + .join("") + : "" +} +
+ + +

Per-Ticket Cost Trend

+
+
Rolling median tokens per ticket · median: ${fmtNum(perTicket.median)} · mean: ${fmtNum(perTicket.mean)} · ${perTicket.ticket_count} tickets
+ ${sparklineSvg(perTicketSeries, { width: 580, height: 60, stroke: "#58a6ff", strokeWidth: 2 })} +
+ + +

Outlier Analysis

+${ + outliers.length === 0 + ? '

No outliers detected (>2σ threshold)

' + : outliers + .map( + (o) => ` +
+
${escHtml(o.issue_identifier)} — ${escHtml(o.issue_title)} — ${fmtNum(o.total_tokens)} tokens (z=${o.z_score})
+
${escHtml(o.hypothesis ?? "No hypothesis available")}
+ ${o.parent ? `
Parent: ${escHtml(o.parent.identifier)} (${escHtml(o.parent.complexity)}, ${o.parent.task_count} tasks)
` : ""} +
`, + ) + .join("") +} + + +

Issue Leaderboard

+ + + +${leaderboard + .slice(0, 25) + .map( + (item, i) => + ` `, + ) + .join("\n")} + +
#IssueTitleTokens
${i + 1}${escHtml(item.identifier)}${escHtml(item.title)}${fmtNum(item.tokens)}
+ + +

Stage Efficiency

+${Object.entries(perStageSpend) + .map( + ([stage, data]) => ` +
+
+ ${escHtml(stage)} + ${fmtNum(data.total_tokens)} tokens · ${data.count} runs · ${data.completed} ok · ${data.failed} fail +
+
+ 30d trend: + ${sparklineSvg(stageSparklines[stage] ?? [], { stroke: "#58a6ff" })} +
+
`, + ) + .join("")} + + +

Per-Product Breakdown

+ + + +${(() => { + const totalTokens = + Object.values(perProduct).reduce((s, p) => s + (p.total_tokens ?? 0), 0) || + 1; + return Object.entries(perProduct) + .sort((a, b) => (b[1].total_tokens ?? 0) - (a[1].total_tokens ?? 0)) + .map(([name, data]) => { + const pct = round((data.total_tokens / totalTokens) * 100, 1); + return ` `; + }) + .join("\n"); +})()} + +
ProductTokensStagesIssuesShare
${escHtml(name)}${fmtNum(data.total_tokens)}${data.total_stages}${data.unique_issues}
${pct}%
+ +
Symphony Token Report · Self-contained · Generated by token-report.mjs · SYMPH-131
+ +`; + + return html; +} + +/** + * Render subcommand: generate HTML report file. + */ +function runRender() { + const analysis = computeAnalysis(); + const html = renderHtml(analysis); + const today = dateKey(new Date()); + const outPath = join(REPORTS_DIR, `${today}.html`); + mkdirSync(REPORTS_DIR, { recursive: true }); + writeFileSync(outPath, html); + info(`Report written to ${outPath}`); +} + +// --------------------------------------------------------------------------- +// Slack subcommand — SYMPH-131 +// --------------------------------------------------------------------------- + +/** + * Post narrative Slack digest via Bot Token API (SYMPH-139). + * + * 9-section markdown digest with interpretive commentary. + * Set DRY_RUN=1 to log to stderr instead of posting. + */ +function runSlack() { + const botToken = process.env.SLACK_BOT_TOKEN; + if (!botToken) { + warn("SLACK_BOT_TOKEN not set — skipping Slack digest"); + return; + } + const channelId = process.env.SLACK_CHANNEL_ID || "C0ANRJRBYGL"; + + const analysis = computeAnalysis(); + const es = analysis.executive_summary ?? {}; + const sc = analysis.efficiency_scorecard ?? {}; + const outliers = Array.isArray(analysis.outliers) + ? analysis.outliers + : (analysis.outliers?.items ?? []); + const inflections = Array.isArray(analysis.inflections) + ? analysis.inflections + : (analysis.inflections?.items ?? []); + const perStageSpend = analysis.per_stage_spend ?? {}; + const perProduct = analysis.per_product ?? {}; + const perTicket = analysis.per_ticket_trend ?? {}; + + // Compute tokens-per-issue + const records = readJsonl(TOKEN_HISTORY_PATH); + const issueTokens = {}; + for (const r of records) { + if (r.issue_identifier) + issueTokens[r.issue_identifier] = + (issueTokens[r.issue_identifier] ?? 0) + (r.total_total_tokens ?? 0); + } + const issueValues = Object.values(issueTokens); + const medianTPI = fmtNum(median(issueValues)); + const meanTPI = fmtNum(mean(issueValues)); + + // Top consumer + let topConsumer = "—"; + if (issueValues.length > 0) { + const sorted = Object.entries(issueTokens).sort((a, b) => b[1] - a[1]); + topConsumer = `${sorted[0][0]} (${fmtNum(sorted[0][1])})`; + } + + // Report link — always use BASE_URL; fall back to pro16.local:{port} + const reportPort = process.env.TOKEN_REPORT_PORT || "8090"; + const baseUrl = process.env.BASE_URL || `pro16.local:${reportPort}`; + const today = dateKey(new Date()); + const reportUrl = `http://${baseUrl}/${today}.html`; + + // --- Section 1: Title --- + const sections = []; + sections.push(`*🎵 Symphony Token Digest — ${today}*`); + + // --- Section 2: Executive Summary --- + const spanDays = analysis.data_span_days ?? 0; + const tier = analysis.cold_start_tier ?? "unknown"; + sections.push( + `*Executive Summary*\n> *${fmtNum(es.total_tokens?.value)}* tokens across *${fmtNum(es.unique_issues?.value)}* issues over *${spanDays}d* (tier: ${tier})\n> ${fmtNum(es.total_stages?.value ?? 0)} total stages completed`, + ); + + // --- Section 3: Tokens per Issue --- + sections.push( + `*Tokens per Issue*\n> Median: *${medianTPI}* · Mean: *${meanTPI}* · Issues tracked: *${issueValues.length}*\n> Top consumer: *${topConsumer}*\n${ + perTicket.ticket_count > 0 + ? `> Rolling trend — median: ${fmtNum(perTicket.median)}, mean: ${fmtNum(perTicket.mean)}` + : "> _No rolling trend data yet_" + }`, + ); + + // --- Section 4: Efficiency Scorecard --- + const cacheEff = round(sc.cache_efficiency?.current ?? 0, 1); + const cacheTrend7d = round(sc.cache_efficiency?.trend_7d ?? 0, 1); + const outputRatio = round(sc.output_ratio?.current ?? 0, 1); + const firstPass = round(sc.first_pass_rate?.current ?? 0, 1); + const tokPerTurn = fmtNum(sc.tokens_per_turn?.current ?? 0); + const wastedCtx = round(sc.wasted_context?.current ?? 0, 1); + sections.push( + `*Efficiency Scorecard*\n> Cache hit rate: *${cacheEff}%* (7d trend: ${cacheTrend7d >= 0 ? "+" : ""}${cacheTrend7d}%)\n> Output ratio: *${outputRatio}%* · First-pass success: *${firstPass}%*\n> Tokens/turn: *${tokPerTurn}* · Wasted context: *${wastedCtx}%*`, + ); + + // --- Section 5: Per-Stage Spend --- + const stageEntries = Object.entries(perStageSpend); + if (stageEntries.length > 0) { + const stageLines = stageEntries + .sort((a, b) => (b[1]?.total ?? 0) - (a[1]?.total ?? 0)) + .slice(0, 5) + .map( + ([stage, data]) => + `> • ${stage}: *${fmtNum(data?.total ?? 0)}* tokens (${fmtNum(data?.count ?? 0)} stages)`, + ); + sections.push(`*Per-Stage Spend (top 5)*\n${stageLines.join("\n")}`); + } else { + sections.push("*Per-Stage Spend*\n> _No stage data available_"); + } + + // --- Section 6: Per-Product Breakdown --- + const productEntries = Object.entries(perProduct); + if (productEntries.length > 0) { + const productLines = productEntries + .sort((a, b) => (b[1]?.total_tokens ?? 0) - (a[1]?.total_tokens ?? 0)) + .slice(0, 5) + .map( + ([product, data]) => + `> • ${product}: *${fmtNum(data?.total_tokens ?? 0)}* tokens (${fmtNum(data?.stage_count ?? 0)} stages)`, + ); + sections.push( + `*Per-Product Breakdown (top 5)*\n${productLines.join("\n")}`, + ); + } else { + sections.push("*Per-Product Breakdown*\n> _No product data available_"); + } + + // --- Section 7: Outliers --- + if (outliers.length > 0) { + const outlierLines = outliers + .slice(0, 5) + .map( + (o) => + `> • ⚠️ ${o.issue_identifier}: *${fmtNum(o.total_tokens)}* tokens (z=${round(o.z_score, 2)})${o.hypothesis ? ` — ${o.hypothesis}` : ""}`, + ); + sections.push( + `*Outliers* (>${"2σ"} from mean)\n${outlierLines.join("\n")}`, + ); + } else { + sections.push( + "*Outliers*\n> ✅ No outliers detected — all issues within 2σ of mean", + ); + } + + // --- Section 8: Inflections --- + if (inflections.length > 0) { + const inflectionLines = inflections + .slice(0, 5) + .map( + (inf) => + `> • ⚡ ${inf.stage}: ${inf.direction} *${round(inf.pct_change, 1)}%* (7d avg crossed 30d avg)`, + ); + sections.push(`*Trend Inflections*\n${inflectionLines.join("\n")}`); + } else { + sections.push( + `*Trend Inflections*\n> _No inflection points detected${spanDays < 30 ? " (requires ≥30d of data)" : ""}_`, + ); + } + + // --- Section 9: Report Link --- + sections.push(`📊 <${reportUrl}|View full HTML report>`); + + const message = sections.join("\n\n"); + + // DRY_RUN support: log to stderr instead of posting + if (process.env.DRY_RUN) { + process.stderr.write(`[DRY_RUN] Slack digest message:\n${message}\n`); + info("DRY_RUN set — Slack digest logged to stderr, not posted"); + return; + } + + const payload = JSON.stringify({ channel: channelId, text: message }); + + try { + const response = execFileSync( + "curl", + [ + "-s", + "-X", + "POST", + "https://slack.com/api/chat.postMessage", + "-H", + `Authorization: Bearer ${botToken}`, + "-H", + "Content-type: application/json; charset=utf-8", + "-d", + payload, + ], + { + encoding: "utf-8", + timeout: 30000, + stdio: ["pipe", "pipe", "pipe"], + }, + ); + let parsed; + try { + parsed = JSON.parse(response); + } catch { + warn(`Slack post returned non-JSON response: ${response.slice(0, 200)}`); + return; + } + if (parsed.ok) { + info("Slack digest posted"); + } else { + warn(`Slack API error: ${parsed.error ?? "unknown"}`); + } + } catch (err) { + warn(`Slack post failed: ${err.message}`); + // Graceful degradation: don't throw + } +} + +// --------------------------------------------------------------------------- +// Rotate subcommand — SYMPH-131 +// --------------------------------------------------------------------------- + +import { + createReadStream, + createWriteStream, + unlinkSync, + utimesSync, +} from "node:fs"; +import { pipeline } from "node:stream/promises"; +import { createGzip } from "node:zlib"; + +/** + * Log rotation: compress/delete old JSONL logs and HTML reports. + * + * Raw JSONL: compress >7d, delete >14d, skip mtime <2h + * HTML reports: delete >90d + * + * Replaces com.symphony.newsyslog.conf for symphony logs. + */ +async function runRotate() { + const now = Date.now(); + const TWO_HOURS_MS = 2 * 60 * 60 * 1000; + const SEVEN_DAYS_MS = 7 * 24 * 60 * 60 * 1000; + const FOURTEEN_DAYS_MS = 14 * 24 * 60 * 60 * 1000; + const NINETY_DAYS_MS = 90 * 24 * 60 * 60 * 1000; + + // Rotate JSONL files in data dir + if (existsSync(DATA_DIR)) { + const files = readdirSync(DATA_DIR); + for (const file of files) { + const filePath = join(DATA_DIR, file); + let st; + try { + st = statSync(filePath); + } catch { + continue; + } + if (!st.isFile()) continue; + + const age = now - st.mtimeMs; + + // Safety: never touch files modified less than 2 hours ago + if (age < TWO_HOURS_MS) continue; + + // Delete compressed files older than 14 days + if (file.endsWith(".jsonl.gz") && age > FOURTEEN_DAYS_MS) { + info(`Deleting old compressed log: ${file}`); + unlinkSync(filePath); + continue; + } + + // Compress JSONL files older than 7 days + if (file.endsWith(".jsonl") && age > SEVEN_DAYS_MS) { + info(`Compressing old log: ${file}`); + const gzPath = `${filePath}.gz`; + try { + await pipeline( + createReadStream(filePath), + createGzip(), + createWriteStream(gzPath), + ); + // Preserve mtime on compressed file + utimesSync(gzPath, st.atime, st.mtime); + unlinkSync(filePath); + } catch (err) { + warn(`Failed to compress ${file}: ${err.message}`); + } + } + } + } + + // Delete old HTML reports + if (existsSync(REPORTS_DIR)) { + const files = readdirSync(REPORTS_DIR); + for (const file of files) { + if (!file.endsWith(".html")) continue; + const filePath = join(REPORTS_DIR, file); + let st; + try { + st = statSync(filePath); + } catch { + continue; + } + if (!st.isFile()) continue; + const age = now - st.mtimeMs; + if (age > NINETY_DAYS_MS) { + info(`Deleting old report: ${file}`); + unlinkSync(filePath); + } + } + } + + info("Log rotation complete"); +} + +// --------------------------------------------------------------------------- +// Main +// --------------------------------------------------------------------------- + +function ensureDirs(...dirs) { + for (const dir of dirs) { + mkdirSync(dir, { recursive: true }); + } +} + +const subcommand = process.argv[2]; + +if (!subcommand || subcommand === "extract") { + ensureDirs( + DATA_DIR, + HWM_DIR, + LINEAR_CACHE_DIR, + join(SYMPHONY_HOME, "logs"), + REPORTS_DIR, + ); + runExtract(); +} else if (subcommand === "analyze") { + ensureDirs(DATA_DIR, LINEAR_CACHE_DIR); + runAnalyze(); +} else if (subcommand === "render") { + ensureDirs(DATA_DIR, LINEAR_CACHE_DIR, REPORTS_DIR); + runRender(); +} else if (subcommand === "slack") { + ensureDirs(DATA_DIR, LINEAR_CACHE_DIR); + runSlack(); +} else if (subcommand === "rotate") { + runRotate().catch((err) => { + process.stderr.write(`ERROR: rotate failed: ${err.message}\n`); + process.exit(1); + }); +} else { + process.stderr.write( + `Unknown subcommand: ${subcommand}\nUsage: token-report.mjs [extract|analyze|render|slack|rotate]\n`, + ); + process.exit(1); +} diff --git a/ops/token-report.sh b/ops/token-report.sh new file mode 100755 index 00000000..6da6c100 --- /dev/null +++ b/ops/token-report.sh @@ -0,0 +1,127 @@ +#!/usr/bin/env bash +set -euo pipefail + +# token-report.sh — Wrapper for token history extraction, analysis, reporting, and rotation +# +# Responsibilities: +# - Validate/set default env vars (SYMPHONY_HOME, SYMPHONY_LOG_DIR) +# - Create directory tree +# - Acquire lockfile via shlock (concurrent execution guard) +# - Route to node ops/token-report.mjs +# - Orchestrate daily pipeline (extract → analyze → render → slack → rotate) +# - Release lockfile via trap +# +# Usage: token-report.sh [extract|analyze|render|slack|rotate|daily] +# +# SYMPH-129, SYMPH-131 + +SCRIPT_DIR="$(cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")" && pwd)" +SYMPHONY_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# --------------------------------------------------------------------------- +# Environment defaults +# --------------------------------------------------------------------------- + +export SYMPHONY_HOME="${SYMPHONY_HOME:-$HOME/.symphony}" +export SYMPHONY_LOG_DIR="${SYMPHONY_LOG_DIR:-$HOME/Library/Logs/symphony}" + +# --------------------------------------------------------------------------- +# Directory tree creation +# --------------------------------------------------------------------------- + +mkdir -p "$SYMPHONY_HOME"/{data/.hwm,data/linear-cache,logs,reports} + +# --------------------------------------------------------------------------- +# Lockfile management +# --------------------------------------------------------------------------- + +LOCKFILE="$SYMPHONY_HOME/data/.lock" + +cleanup_lock() { + rm -f "$LOCKFILE" +} + +acquire_lock() { + if command -v shlock >/dev/null 2>&1; then + if ! shlock -p $$ -f "$LOCKFILE"; then + echo "Another instance is running, skipping" >&2 + exit 0 + fi + else + # Fallback: simple mkdir-based lock for systems without shlock + if ! mkdir "$LOCKFILE.d" 2>/dev/null; then + echo "Another instance is running, skipping" >&2 + exit 0 + fi + # Override cleanup to remove directory lock + cleanup_lock() { + rm -f "$LOCKFILE" + rmdir "$LOCKFILE.d" 2>/dev/null || true + } + fi + trap cleanup_lock EXIT INT TERM +} + +# --------------------------------------------------------------------------- +# Subcommand routing +# --------------------------------------------------------------------------- + +SUBCOMMAND="${1:-extract}" +NODE_BIN="${SYMPHONY_NODE:-$(which node 2>/dev/null || echo /opt/homebrew/bin/node)}" + +case "$SUBCOMMAND" in + extract) + acquire_lock + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" extract + ;; + analyze) + acquire_lock + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" analyze + ;; + render) + acquire_lock + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" render + ;; + slack) + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" slack + ;; + rotate) + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" rotate + ;; + daily) + acquire_lock + + # Daily pipeline: extract → analyze → render → slack → rotate + # If extract/analyze/render fail → skip subsequent, exit non-zero + # Slack failure → log warning, continue to rotate (graceful degradation) + # Rotate failure → log warning, exit non-zero + + echo "INFO: Starting daily pipeline" >&2 + + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" extract + echo "INFO: extract complete" >&2 + + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" analyze > /dev/null + echo "INFO: analyze complete" >&2 + + "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" render + echo "INFO: render complete" >&2 + + # Slack: graceful degradation — failure logs warning but continues + if ! "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" slack; then + echo "WARN: Slack step failed, continuing to rotate" >&2 + fi + + # Rotate: failure is non-zero exit + if ! "$NODE_BIN" "$SCRIPT_DIR/token-report.mjs" rotate; then + echo "WARN: Rotate step failed" >&2 + exit 1 + fi + + echo "INFO: Daily pipeline complete" >&2 + ;; + *) + echo "Usage: token-report.sh [extract|analyze|render|slack|rotate|daily]" >&2 + exit 1 + ;; +esac diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..4ae1d980 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,1859 @@ +{ + "name": "symphony-ts", + "version": "0.1.8", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "symphony-ts", + "version": "0.1.8", + "license": "Apache-2.0", + "dependencies": { + "graphql": "^16.13.1", + "liquidjs": "^10.24.0", + "yaml": "^2.8.2", + "zod": "^4.3.6" + }, + "bin": { + "symphony": "dist/src/cli/main.js" + }, + "devDependencies": { + "@biomejs/biome": "^1.9.4", + "@types/node": "^22.13.14", + "typescript": "^5.8.2", + "vitest": "^3.0.8" + }, + "engines": { + "node": ">=22.0.0", + "pnpm": ">=10.0.0" + } + }, + "node_modules/@biomejs/biome": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-1.9.4.tgz", + "integrity": "sha512-1rkd7G70+o9KkTn5KLmDYXihGoTaIGO9PIIN2ZB7UJxFrWw04CZHPYiMRjYsaDvVV7hP1dYNRLxSANLaBFGpog==", + "dev": true, + "hasInstallScript": true, + "license": "MIT OR Apache-2.0", + "bin": { + "biome": "bin/biome" + }, + "engines": { + "node": ">=14.21.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/biome" + }, + "optionalDependencies": { + "@biomejs/cli-darwin-arm64": "1.9.4", + "@biomejs/cli-darwin-x64": "1.9.4", + "@biomejs/cli-linux-arm64": "1.9.4", + "@biomejs/cli-linux-arm64-musl": "1.9.4", + "@biomejs/cli-linux-x64": "1.9.4", + "@biomejs/cli-linux-x64-musl": "1.9.4", + "@biomejs/cli-win32-arm64": "1.9.4", + "@biomejs/cli-win32-x64": "1.9.4" + } + }, + "node_modules/@biomejs/cli-darwin-arm64": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-1.9.4.tgz", + "integrity": "sha512-bFBsPWrNvkdKrNCYeAp+xo2HecOGPAy9WyNyB/jKnnedgzl4W4Hb9ZMzYNbf8dMCGmUdSavlYHiR01QaYR58cw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-darwin-x64": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-1.9.4.tgz", + "integrity": "sha512-ngYBh/+bEedqkSevPVhLP4QfVPCpb+4BBe2p7Xs32dBgs7rh9nY2AIYUL6BgLw1JVXV8GlpKmb/hNiuIxfPfZg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-1.9.4.tgz", + "integrity": "sha512-fJIW0+LYujdjUgJJuwesP4EjIBl/N/TcOX3IvIHJQNsAqvV2CHIogsmA94BPG6jZATS4Hi+xv4SkBBQSt1N4/g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64-musl": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-1.9.4.tgz", + "integrity": "sha512-v665Ct9WCRjGa8+kTr0CzApU0+XXtRgwmzIf1SeKSGAv+2scAlW6JR5PMFo6FzqqZ64Po79cKODKf3/AAmECqA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-1.9.4.tgz", + "integrity": "sha512-lRCJv/Vi3Vlwmbd6K+oQ0KhLHMAysN8lXoCI7XeHlxaajk06u7G+UsFSO01NAs5iYuWKmVZjmiOzJ0OJmGsMwg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64-musl": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-1.9.4.tgz", + "integrity": "sha512-gEhi/jSBhZ2m6wjV530Yy8+fNqG8PAinM3oV7CyO+6c3CEh16Eizm21uHVsyVBEB6RIM8JHIl6AGYCv6Q6Q9Tg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-arm64": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-1.9.4.tgz", + "integrity": "sha512-tlbhLk+WXZmgwoIKwHIHEBZUwxml7bRJgk0X2sPyNR3S93cdRq6XulAZRQJ17FYGGzWne0fgrXBKpl7l4M87Hg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-x64": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-1.9.4.tgz", + "integrity": "sha512-8Y5wMhVIPaWe6jw2H+KlEm4wP/f7EW3810ZLmDlrEEy5KvBsb9ECEfu/kMWD484ijfQ8+nIi0giMgu9g1UAuuA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.4.tgz", + "integrity": "sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.4.tgz", + "integrity": "sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.4.tgz", + "integrity": "sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.4.tgz", + "integrity": "sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.4.tgz", + "integrity": "sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.4.tgz", + "integrity": "sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.4.tgz", + "integrity": "sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.4.tgz", + "integrity": "sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.4.tgz", + "integrity": "sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.4.tgz", + "integrity": "sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.4.tgz", + "integrity": "sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.4.tgz", + "integrity": "sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.4.tgz", + "integrity": "sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.4.tgz", + "integrity": "sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.4.tgz", + "integrity": "sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.4.tgz", + "integrity": "sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.4.tgz", + "integrity": "sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.4.tgz", + "integrity": "sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.4.tgz", + "integrity": "sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.4.tgz", + "integrity": "sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.4.tgz", + "integrity": "sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.4.tgz", + "integrity": "sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.4.tgz", + "integrity": "sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.4.tgz", + "integrity": "sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.4.tgz", + "integrity": "sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.4.tgz", + "integrity": "sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", + "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", + "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", + "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", + "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", + "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", + "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", + "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", + "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", + "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", + "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", + "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", + "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", + "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", + "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", + "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", + "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", + "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", + "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", + "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", + "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", + "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", + "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", + "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", + "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", + "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.19.15", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.15.tgz", + "integrity": "sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/check-error": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz", + "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.27.4", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.4.tgz", + "integrity": "sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.4", + "@esbuild/android-arm": "0.27.4", + "@esbuild/android-arm64": "0.27.4", + "@esbuild/android-x64": "0.27.4", + "@esbuild/darwin-arm64": "0.27.4", + "@esbuild/darwin-x64": "0.27.4", + "@esbuild/freebsd-arm64": "0.27.4", + "@esbuild/freebsd-x64": "0.27.4", + "@esbuild/linux-arm": "0.27.4", + "@esbuild/linux-arm64": "0.27.4", + "@esbuild/linux-ia32": "0.27.4", + "@esbuild/linux-loong64": "0.27.4", + "@esbuild/linux-mips64el": "0.27.4", + "@esbuild/linux-ppc64": "0.27.4", + "@esbuild/linux-riscv64": "0.27.4", + "@esbuild/linux-s390x": "0.27.4", + "@esbuild/linux-x64": "0.27.4", + "@esbuild/netbsd-arm64": "0.27.4", + "@esbuild/netbsd-x64": "0.27.4", + "@esbuild/openbsd-arm64": "0.27.4", + "@esbuild/openbsd-x64": "0.27.4", + "@esbuild/openharmony-arm64": "0.27.4", + "@esbuild/sunos-x64": "0.27.4", + "@esbuild/win32-arm64": "0.27.4", + "@esbuild/win32-ia32": "0.27.4", + "@esbuild/win32-x64": "0.27.4" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/graphql": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.13.1.tgz", + "integrity": "sha512-gGgrVCoDKlIZ8fIqXBBb0pPKqDgki0Z/FSKNiQzSGj2uEYHr1tq5wmBegGwJx6QB5S5cM0khSBpi/JFHMCvsmQ==", + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, + "node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/liquidjs": { + "version": "10.25.0", + "resolved": "https://registry.npmjs.org/liquidjs/-/liquidjs-10.25.0.tgz", + "integrity": "sha512-XpO7AiGULTG4xcTlwkcTI5JreFG7b6esLCLp+aUSh7YuQErJZEoUXre9u9rbdb0057pfWG4l0VursvLd5Q/eAw==", + "license": "MIT", + "dependencies": { + "commander": "^10.0.0" + }, + "bin": { + "liquid": "bin/liquid.js", + "liquidjs": "bin/liquid.js" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/liquidjs" + } + }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/rollup": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", + "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.59.0", + "@rollup/rollup-android-arm64": "4.59.0", + "@rollup/rollup-darwin-arm64": "4.59.0", + "@rollup/rollup-darwin-x64": "4.59.0", + "@rollup/rollup-freebsd-arm64": "4.59.0", + "@rollup/rollup-freebsd-x64": "4.59.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", + "@rollup/rollup-linux-arm-musleabihf": "4.59.0", + "@rollup/rollup-linux-arm64-gnu": "4.59.0", + "@rollup/rollup-linux-arm64-musl": "4.59.0", + "@rollup/rollup-linux-loong64-gnu": "4.59.0", + "@rollup/rollup-linux-loong64-musl": "4.59.0", + "@rollup/rollup-linux-ppc64-gnu": "4.59.0", + "@rollup/rollup-linux-ppc64-musl": "4.59.0", + "@rollup/rollup-linux-riscv64-gnu": "4.59.0", + "@rollup/rollup-linux-riscv64-musl": "4.59.0", + "@rollup/rollup-linux-s390x-gnu": "4.59.0", + "@rollup/rollup-linux-x64-gnu": "4.59.0", + "@rollup/rollup-linux-x64-musl": "4.59.0", + "@rollup/rollup-openbsd-x64": "4.59.0", + "@rollup/rollup-openharmony-arm64": "4.59.0", + "@rollup/rollup-win32-arm64-msvc": "4.59.0", + "@rollup/rollup-win32-ia32-msvc": "4.59.0", + "@rollup/rollup-win32-x64-gnu": "4.59.0", + "@rollup/rollup-win32-x64-msvc": "4.59.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/strip-literal": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.1.0.tgz", + "integrity": "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yaml": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", + "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" + } + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/package.json b/package.json index 6dcbf925..2d730f07 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "symphony-ts", - "version": "0.1.8", + "version": "2026.03.23.1", "license": "Apache-2.0", "type": "module", "description": "TypeScript implementation of Symphony", @@ -37,7 +37,7 @@ "build": "tsc -p tsconfig.build.json", "prepack": "pnpm build", "typecheck": "tsc -p tsconfig.json --noEmit", - "test": "vitest run", + "test": "node scripts/test.mjs", "test:watch": "vitest", "lint": "biome check .", "format": "biome format --write ." @@ -49,6 +49,14 @@ "vitest": "^3.0.8" }, "dependencies": { + "@ai-sdk/provider": "^3.0.8", + "@google/gemini-cli-core": "^0.33.2", + "@google/genai": "^1.45.0", + "@slack/bolt": "^4.6.0", + "@slack/web-api": "^7.15.0", + "ai": "^6.0.116", + "ai-sdk-provider-claude-code": "^3.4.4", + "ai-sdk-provider-gemini-cli": "^2.0.1", "graphql": "^16.13.1", "liquidjs": "^10.24.0", "yaml": "^2.8.2", diff --git a/pipeline-config/WORKFLOW-flat.md b/pipeline-config/WORKFLOW-flat.md new file mode 100644 index 00000000..1e748fa8 --- /dev/null +++ b/pipeline-config/WORKFLOW-flat.md @@ -0,0 +1,108 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 1fa66498be91 + active_states: + - Todo + terminal_states: + - Done + - Cancelled + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream main..." + git fetch origin main + if ! git rebase origin/main 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort + fi + echo "Workspace synced." + timeout_ms: 120000 + +server: + port: 4321 + +observability: + dashboard_enabled: true + refresh_ms: 5000 +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# Implementation: {{ issue.identifier }} — {{ issue.title }} + +You are implementing Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Commit your changes with message format: `feat({{ issue.identifier }}): `. +7. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. +8. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/WORKFLOW-instrumentation.md b/pipeline-config/WORKFLOW-instrumentation.md new file mode 100644 index 00000000..81d5b2c0 --- /dev/null +++ b/pipeline-config/WORKFLOW-instrumentation.md @@ -0,0 +1,429 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: fdba14472043 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-6 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4321 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-6 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-6 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-6 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +### Required: Structured Map + +After your prose findings, you MUST include a structured map section in the workpad with the following format: + +``` +### Files to Change +- path/to/file.ts:LINE_START-LINE_END — what needs to change and why + +### Read Order +1. path/to/primary.ts (primary change target) +2. path/to/types.ts (type definitions needed) +3. path/to/related.test.ts (test file to update) + +### Key Dependencies +- FunctionX is called from A, B, C +- InterfaceY is used in D, E +``` + +This structured map helps the implementation agent navigate the codebase efficiently without re-reading files you already explored. + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. An investigation was done in the previous stage — check issue comments for the plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/WORKFLOW-staged.md b/pipeline-config/WORKFLOW-staged.md new file mode 100644 index 00000000..ca7d7f43 --- /dev/null +++ b/pipeline-config/WORKFLOW-staged.md @@ -0,0 +1,409 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 1fa66498be91 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4321 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. An investigation was done in the previous stage — check issue comments for the plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/WORKFLOW.md b/pipeline-config/WORKFLOW.md new file mode 100644 index 00000000..5ecee134 --- /dev/null +++ b/pipeline-config/WORKFLOW.md @@ -0,0 +1,100 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: $LINEAR_PROJECT_SLUG + active_states: + - Todo + - In Progress + - In Review + - Rework + terminal_states: + - Done + - Cancelled + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 3 + max_turns: 30 + max_retry_backoff_ms: 300000 + max_concurrent_agents_by_state: + in progress: 3 + in review: 2 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: ./hooks/after-create.sh + before_run: ./hooks/before-run.sh + timeout_ms: 120000 + +server: + port: 4321 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-opus-4 + max_turns: 8 + prompt: prompts/investigate.liquid + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + prompt: prompts/implement.liquid + on_complete: review + + review: + type: gate + gate_type: ensemble + max_rework: 3 + reviewers: + - runner: codex + model: gpt-5.3-codex + role: adversarial-reviewer + prompt: prompts/review-adversarial.liquid + - runner: gemini + model: gemini-3-pro + role: security-reviewer + prompt: prompts/review-security.liquid + on_approve: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + prompt: prompts/merge.liquid + on_complete: done + + done: + type: terminal +--- + +{% render 'prompts/global.liquid' %} + +You are working on Linear issue {{ issue.identifier }}: {{ issue.title }}. + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} diff --git a/pipeline-config/hooks/after-create.sh b/pipeline-config/hooks/after-create.sh new file mode 100755 index 00000000..5c89db35 --- /dev/null +++ b/pipeline-config/hooks/after-create.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -euo pipefail + +# after-create hook: Set up a fresh workspace for an agent. +# Called by symphony-ts after creating the workspace directory. +# Expects REPO_URL to be set in the environment. + +if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 +fi + +echo "Cloning $REPO_URL into workspace..." +git clone --depth 1 "$REPO_URL" . + +# Install dependencies based on what's present +if [ -f package.json ]; then + echo "Installing Node.js dependencies..." + if [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi +fi + +if [ -f requirements.txt ]; then + echo "Installing Python dependencies..." + pip install -r requirements.txt +fi + +echo "Workspace setup complete." diff --git a/pipeline-config/hooks/before-run.sh b/pipeline-config/hooks/before-run.sh new file mode 100755 index 00000000..5b5690f0 --- /dev/null +++ b/pipeline-config/hooks/before-run.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail + +# before-run hook: Sync workspace with upstream before each agent run. +# Ensures the agent starts from the latest main branch state. + +echo "Syncing workspace with upstream main..." +git fetch origin main + +# Attempt rebase; abort if conflicts arise (agent starts from current state) +if ! git rebase origin/main 2>/dev/null; then + echo "WARNING: Rebase failed due to conflicts, aborting rebase" >&2 + git rebase --abort +fi + +echo "Workspace synced." diff --git a/pipeline-config/ports.json b/pipeline-config/ports.json new file mode 100644 index 00000000..6401286c --- /dev/null +++ b/pipeline-config/ports.json @@ -0,0 +1,10 @@ +{ + "symphony": 4321, + "jony-agent": 4322, + "hs-data": 4323, + "hs-ui": 4324, + "hs-mobile": 4325, + "stickerlabs": 4326, + "household": 4327, + "toys": 4328 +} diff --git a/pipeline-config/prompts/global.liquid b/pipeline-config/prompts/global.liquid new file mode 100644 index 00000000..dca26509 --- /dev/null +++ b/pipeline-config/prompts/global.liquid @@ -0,0 +1,9 @@ +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +If a design reference is provided in the issue, read it via Paper/Pencil MCP tools to get exact values (spacing, colors, typography, layout). Do not approximate from memory or screenshots — query the design directly. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +If a decision is marked as "Explicitly Deferred" in the spec, do not raise it or try to resolve it. The founder has deliberately punted this. diff --git a/pipeline-config/prompts/implement.liquid b/pipeline-config/prompts/implement.liquid new file mode 100644 index 00000000..7118cf13 --- /dev/null +++ b/pipeline-config/prompts/implement.liquid @@ -0,0 +1,46 @@ +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +If a design reference is provided in the issue, read it via Paper/Pencil MCP tools to get exact values (spacing, colors, typography, layout). Do not approximate from memory or screenshots — query the design directly. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +If a decision is marked as "Explicitly Deferred" in the spec, do not raise it or try to resolve it. The founder has deliberately punted this. + +--- + +# Implementation: {{ issue.identifier }} — {{ issue.title }} + +You are implementing Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description and investigation notes. +4. Write tests as needed — for UI scenarios, write Playwright test files; for API scenarios, verify commands run directly. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Commit your changes with message format: `feat({{ issue.identifier }}): `. +7. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. +8. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +## Verify Line Rules + +- Run all `# Verify:` commands found in the issue description or linked spec scenarios. +- Every verify command must exit 0 before you are done. +- If a verify command appears to contradict the implementation or seems wrong, flag the specific verify line in the PR description, explain the contradiction, and move the issue to "blocked" immediately. Do not attempt to make incorrect verify lines pass by changing the implementation. + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). +- E2E test file names should map to spec capability names where applicable. diff --git a/pipeline-config/prompts/investigate.liquid b/pipeline-config/prompts/investigate.liquid new file mode 100644 index 00000000..b673bba3 --- /dev/null +++ b/pipeline-config/prompts/investigate.liquid @@ -0,0 +1,43 @@ +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +If a design reference is provided in the issue, read it via Paper/Pencil MCP tools to get exact values (spacing, colors, typography, layout). Do not approximate from memory or screenshots — query the design directly. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +If a decision is marked as "Explicitly Deferred" in the spec, do not raise it or try to resolve it. The founder has deliberately punted this. + +--- + +# Investigation: {{ issue.identifier }} — {{ issue.title }} + +You are investigating Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +## Your Task + +This is the **investigation stage only**. Do NOT implement anything. + +1. Read the issue description carefully and understand what is being asked. +2. Explore the codebase to identify all relevant files, modules, and dependencies. +3. Identify potential risks, edge cases, or ambiguities in the task. +4. Create a brief implementation plan as a comment on the Linear issue via the `linear_graphql` tool. The plan should include: + - Files that will be created or modified + - Key implementation approach + - Any dependencies or blockers identified + - Estimated complexity (small / medium / large) +5. If the issue references a design document, read it via Paper/Pencil MCP tools and note the key design values in your plan. + +Do NOT: +- Write any implementation code +- Create branches or PRs +- Modify any source files +- Run tests (there's nothing to test yet) diff --git a/pipeline-config/prompts/merge.liquid b/pipeline-config/prompts/merge.liquid new file mode 100644 index 00000000..269e433a --- /dev/null +++ b/pipeline-config/prompts/merge.liquid @@ -0,0 +1,26 @@ +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +--- + +# Merge: {{ issue.identifier }} — {{ issue.title }} + +You are merging the PR for Linear issue {{ issue.identifier }}. + +{% if issue.url %}PR URL: {{ issue.url }}{% endif %} + +## Merge Steps + +1. Verify CI is green on the PR. Check via `gh pr checks` or `gh pr view --json statusCheckRollup`. +2. If CI is not green, do NOT merge. Report the failure and stop. +3. Squash merge the PR via: + ``` + gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||") + ``` +4. Update the Linear issue state to "Done" using the `linear_graphql` tool. +5. Verify the merge was successful by checking the PR status. + +## Important + +- Do NOT force merge if checks are failing. +- Do NOT merge if the PR has unresolved review comments. +- If the merge fails due to conflicts, move the issue back to "In Progress" state and stop. diff --git a/pipeline-config/prompts/review-adversarial.liquid b/pipeline-config/prompts/review-adversarial.liquid new file mode 100644 index 00000000..27d85c84 --- /dev/null +++ b/pipeline-config/prompts/review-adversarial.liquid @@ -0,0 +1,33 @@ +You are a strict adversarial code reviewer. Your job is to find problems, not to praise. + +## Issue Under Review + +- Identifier: {{ issue.identifier }} +- Title: {{ issue.title }} +- Description: {{ issue.description }} +{% if issue.url %}- PR URL: {{ issue.url }}{% endif %} + +## Review Criteria + +Review the PR diff critically. Look for: + +1. **Scope creep**: Does the PR include changes beyond what the issue specifies? Flag any unsolicited improvements, refactoring, or features not in the issue description. +2. **Missing edge cases**: Are error paths handled? What happens with empty inputs, null values, concurrent access, network failures? +3. **Security issues**: Injection vulnerabilities, XSS, auth bypasses, hardcoded secrets, unsafe deserialization. +4. **Breaking changes**: Could this PR break existing functionality? Are there backwards-incompatible API changes? +5. **Test coverage**: Are the changes adequately tested? Do verify lines pass? Are there scenarios that should be tested but aren't? +6. **Code quality**: Obvious bugs, logic errors, race conditions, resource leaks. + +## Output Format + +You MUST respond with exactly two sections: + +**First**: A single JSON line containing your verdict: +``` +{"role": "adversarial-reviewer", "model": "", "verdict": "pass"} +``` +Set verdict to "pass" ONLY if the implementation is correct, properly scoped, and has no significant issues. Set to "fail" for any material concern. + +**Second**: Plain text feedback explaining your assessment. Be specific — reference file names, line numbers, and concrete issues. If failing, explain exactly what needs to change. + +Be strict. Only pass if the implementation is correct and properly scoped. diff --git a/pipeline-config/prompts/review-security.liquid b/pipeline-config/prompts/review-security.liquid new file mode 100644 index 00000000..f5f52888 --- /dev/null +++ b/pipeline-config/prompts/review-security.liquid @@ -0,0 +1,45 @@ +You are a security-focused code reviewer. Your sole concern is identifying vulnerabilities and security risks. + +## Issue Under Review + +- Identifier: {{ issue.identifier }} +- Title: {{ issue.title }} +- Description: {{ issue.description }} +{% if issue.url %}- PR URL: {{ issue.url }}{% endif %} + +## Security Review Checklist + +Review the PR diff for the following OWASP Top 10 and common security issues: + +1. **Injection** (SQL, NoSQL, OS command, LDAP): Are user inputs properly sanitized before use in queries or commands? +2. **Broken Authentication**: Are auth tokens handled securely? Session management issues? Credential exposure? +3. **Sensitive Data Exposure**: Are secrets, API keys, or PII logged or exposed? Are responses over-sharing data? +4. **XML External Entities (XXE)**: Are XML parsers configured securely? +5. **Broken Access Control**: Can users access resources they shouldn't? Are authorization checks present and correct? +6. **Security Misconfiguration**: Insecure defaults, overly permissive CORS, debug mode in production? +7. **Cross-Site Scripting (XSS)**: Is user input properly escaped in HTML/JS output? Are Content Security Policy headers set? +8. **Insecure Deserialization**: Is untrusted data deserialized without validation? +9. **Using Components with Known Vulnerabilities**: Are dependencies up to date? Any known CVEs? +10. **Insufficient Logging & Monitoring**: Are security-relevant events logged? Can attacks be detected? + +Also check: +- **Hardcoded secrets or credentials** in source code +- **Path traversal** vulnerabilities in file operations +- **SSRF** risks in URL handling +- **Race conditions** in security-critical operations + +## Output Format + +You MUST respond with exactly two sections: + +**First**: A single JSON line containing your verdict: +``` +{"role": "security-reviewer", "model": "", "verdict": "pass"} +``` +Set verdict to "pass" ONLY if no security issues were found. Set to "fail" for any security concern, no matter how minor. + +**Second**: Plain text feedback. For each finding, include: +- Severity (critical / high / medium / low) +- File and line number +- Description of the vulnerability +- Suggested remediation diff --git a/pipeline-config/templates/CLAUDE.md.template b/pipeline-config/templates/CLAUDE.md.template new file mode 100644 index 00000000..c1d22597 --- /dev/null +++ b/pipeline-config/templates/CLAUDE.md.template @@ -0,0 +1,66 @@ +# + +Replace with the actual product name (e.g., "Jony Design System", "Healthspanners Mobile App"). + +## Project Overview + +One paragraph: what the product does, who it's for, and why it exists. Keep it concrete — an agent reading this should understand the product's purpose in 30 seconds. + +## Architecture + +Describe the key directories, data flow, and patterns. Include a directory tree of the important paths. Example: + +``` +src/ +├── api/ # REST endpoints +├── components/ # React components +├── lib/ # Shared utilities +└── types/ # TypeScript interfaces +``` + +Mention the primary data flow (e.g., "React frontend → Hono API → SQLite" or "YAML source files → build script → generated output"). Call out any non-obvious architectural decisions. + +## Build & Run + +Exact commands to build, run, and develop. No ambiguity — copy-paste ready. + +```bash +# Install dependencies +npm install + +# Development server (port from D40 port table) +# jony=3000, hs-data=3001, hs-ui=3002, stickerlabs=3003, household=3004, pipeline-test-1=3005 +npm run dev # http://localhost: + +# Build +npm run build + +# Type check +npx tsc --noEmit +``` + +## Conventions + +Language, framework, and style conventions that agents must follow. Cover: + +- **Language/runtime**: e.g., TypeScript strict mode, Node 20, ESNext target +- **Imports**: e.g., `import type { ... }` for types, `.js` extensions for NodeNext +- **Naming**: e.g., kebab-case files, PascalCase components, camelCase functions +- **Patterns**: e.g., barrel exports, Zod at I/O boundaries only, no enums + +## Testing + +- **Framework**: e.g., Vitest, Jest, Playwright +- **Run tests**: `npm test` +- **Pattern**: e.g., co-located `*.test.ts` files or `tests/` directory +- **Coverage**: state expectations (e.g., "all new code must have tests", "critical paths only") + +## Pipeline Notes + +What Symphony pipeline agents need to know that isn't obvious from the code. + +- **Auto-generated files**: list any files that should never be edited directly (e.g., `output/`, `dist/`, `generated/`) +- **Fragile areas**: modules or patterns where agents commonly break things (e.g., "migration files are order-sensitive", "don't modify the auth middleware without updating the test fixtures") +- **Required env vars**: list environment variables the app needs (e.g., `DATABASE_URL`, `BASE_URL`). Note: never commit secrets — reference `.env.example` if one exists +- **Verify commands**: key commands that must pass before a PR is valid (e.g., `npm test`, `npm run build`, `npx tsc --noEmit`) +- **Scope boundaries**: things agents should NOT do (e.g., "don't modify shared components without coordinating", "don't add dependencies without flagging in PR") diff --git a/pipeline-config/templates/CLAUDE.md.tmpl b/pipeline-config/templates/CLAUDE.md.tmpl new file mode 100644 index 00000000..02775183 --- /dev/null +++ b/pipeline-config/templates/CLAUDE.md.tmpl @@ -0,0 +1,60 @@ +# {{PROJECT_NAME}} + +## Project Overview + +{{PROJECT_NAME}} — replace this with a one-paragraph description of what the product does, who it's for, and why it exists. An agent reading this should understand the product's purpose in 30 seconds. + +## Architecture + +Describe the key directories, data flow, and patterns. Include a directory tree of the important paths: + +``` +src/ +├── api/ # REST endpoints +├── components/ # UI components +├── lib/ # Shared utilities +└── types/ # TypeScript interfaces +``` + +## Build & Run + +```bash +# Install dependencies +npm install + +# Development server +npm run dev + +# Build +npm run build + +# Type check +npx tsc --noEmit +``` + +## Conventions + +- **Language/runtime**: TypeScript strict mode, Node 22 +- **Imports**: `import type { ... }` for types +- **Naming**: kebab-case files, PascalCase types, camelCase functions +- **Patterns**: Document project-specific conventions here + +## Testing + +- **Framework**: Vitest (or your project's test framework) +- **Run tests**: `npm test` +- **Pattern**: co-located `*.test.ts` files or `tests/` directory +- **Coverage**: all new code must have tests + +## Pipeline Notes + +- **Repository**: {{REPO_URL}} +- **Auto-generated files**: `dist/`, `node_modules/` +- **Required env vars**: document environment variables the app needs (never commit secrets — reference `.env.example`) +- **Verify commands**: key commands that must pass before a PR is valid: + ```bash + npm test + npm run build + npx tsc --noEmit + ``` +- **Scope boundaries**: document things agents should NOT do diff --git a/pipeline-config/templates/WORKFLOW-template.md b/pipeline-config/templates/WORKFLOW-template.md new file mode 100644 index 00000000..369a8025 --- /dev/null +++ b/pipeline-config/templates/WORKFLOW-template.md @@ -0,0 +1,599 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + # CUSTOMIZE: Set to the Linear project's slugId for this product. + # Find it via: linear_graphql query { projects { nodes { id name slugId } } } + project_slug: + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + + # --- Derive bare clone path (absolute, shared across workers) --- + REPO_SLUG=$(basename "${REPO_URL%.git}") + BARE_CLONE_DIR="$(cd .. && pwd)/.bare-clones" + BARE_CLONE="$BARE_CLONE_DIR/$REPO_SLUG" + WORKSPACE_DIR="$PWD" + ISSUE_KEY=$(basename "$WORKSPACE_DIR") + BRANCH_NAME="worktree/$ISSUE_KEY" + + # --- Create bare clone if it doesn't exist (race-safe) --- + mkdir -p "$BARE_CLONE_DIR" + if [ ! -d "$BARE_CLONE" ]; then + echo "Creating shared bare clone for $REPO_SLUG..." + if ! git clone --bare "$REPO_URL" "$BARE_CLONE" 2>/dev/null; then + # Another worker may have created it concurrently — verify it exists + if [ ! -d "$BARE_CLONE" ]; then + echo "ERROR: Failed to create bare clone at $BARE_CLONE" >&2 + exit 1 + fi + echo "Bare clone already created by another worker." + fi + else + echo "Using existing bare clone at $BARE_CLONE" + fi + + # --- Fetch latest refs into bare clone --- + git -C "$BARE_CLONE" fetch origin 2>/dev/null || echo "WARNING: fetch failed, using cached refs" >&2 + + # --- Create worktree for this issue --- + echo "Creating worktree for $ISSUE_KEY on branch $BRANCH_NAME..." + git -C "$BARE_CLONE" worktree add "$WORKSPACE_DIR" -b "$BRANCH_NAME" main + + # --- Install dependencies --- + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete (worktree: $BRANCH_NAME)." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Resolve git dir (worktree .git is a file, not a directory) --- + resolve_git_dir() { + if [ -f .git ]; then + # Worktree: .git is a file containing "gitdir: /path/to/.bare-clones/repo/worktrees/..." + sed 's/^gitdir: //' .git + elif [ -d .git ]; then + echo ".git" + else + echo "" + fi + } + GIT_DIR=$(resolve_git_dir) + + # --- Git lock handling (works for both worktrees and regular clones) --- + wait_for_git_lock() { + if [ -z "$GIT_DIR" ]; then return; fi + local lock_file="$GIT_DIR/index.lock" + local attempt=0 + while [ -f "$lock_file" ] && [ $attempt -lt 6 ]; do + echo "WARNING: $lock_file exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f "$lock_file" ]; then + echo "WARNING: $lock_file still exists after 30s, removing stale lock" >&2 + rm -f "$lock_file" + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + # In bare clone worktrees, refs are stored as refs/heads/, not refs/remotes/origin/ + # Try origin/ first (regular clone), fall back to (bare clone worktree) + if git show-ref --verify --quiet "refs/remotes/origin/$CURRENT_BRANCH"; then + REBASE_TARGET="origin/$CURRENT_BRANCH" + else + REBASE_TARGET="$CURRENT_BRANCH" + fi + if ! git rebase "$REBASE_TARGET" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + # Import rebase brief into CLAUDE.md if it exists + if [ -f "REBASE-BRIEF.md" ]; then + if ! grep -q "@REBASE-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@REBASE-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + + # --- Handle case where worktree was never fully set up --- + if [ ! -e .git ]; then + echo "No git repo in workspace, nothing to clean up." + exit 0 + fi + + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + + echo "Cleaning up branch $BRANCH..." + + # --- Close any open PR for this branch --- + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + + # --- Remove worktree entry from bare clone --- + REPO_SLUG=$(basename "${REPO_URL%.git}") + BARE_CLONE="$(cd .. && pwd)/.bare-clones/$REPO_SLUG" + if [ -d "$BARE_CLONE" ]; then + echo "Removing worktree entry from bare clone..." + git -C "$BARE_CLONE" worktree remove "$PWD" --force 2>/dev/null || true + git -C "$BARE_CLONE" branch -D "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4321 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + # Fast-track: issues with this label skip the investigate stage and start at the target stage. + # Remove or comment out this block if you do not need fast-track routing. + # fast_track: + # label: trivial + # initial_stage: implement + + investigate: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + on_rework: implement + max_rework: 2 + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} + +**First, determine the rework type:** + +### If `REBASE-BRIEF.md` exists in the worktree root — this is a REBASE REWORK: +1. Read `REBASE-BRIEF.md` for context on conflicting files and recent main commits +2. Rebase the current branch onto `origin/main` and resolve all merge conflicts +3. Run all `# Verify:` commands from the spec to ensure the build still passes +4. Delete `REBASE-BRIEF.md` after successful rebase and verification +5. Do NOT modify code beyond what is necessary to resolve conflicts +6. If conflicts cannot be resolved cleanly, output `[STAGE_FAILED: verify]` with details + +### Else if `## Review Findings` comments exist — this is a REVIEW REWORK: +Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. + +### Merge Queue Context +This repo uses GitHub's merge queue. When you run `gh pr merge`, GitHub will: +- **If checks passed**: Add the PR to the merge queue. You'll see: `"✓ Pull request ...#N will be added to the merge queue for main when ready"` +- **If checks pending**: Enable auto-merge. You'll see: `"✓ Pull request ...#N will be automatically merged via squash when all requirements are met"` + +In BOTH cases, the merge is not immediate — GitHub queues it, rebases, runs CI on the rebased version, then merges. This is normal behavior. Do NOT interpret it as a failure. + +### Step 1: Merge the PR +Run `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")`. This single command is sufficient. Do NOT: +- Retry the merge command if you see a "merge queue" or "auto-merge" response — that IS success +- Run `gh pr merge` with `--admin` to bypass the queue +- Modify any code in this stage + +### Step 2: Wait for Merge to Complete +After the merge command succeeds, wait for the merge queue to finish: +``` +gh pr checks --watch --required --fail-fast +``` +This blocks until all checks complete (including merge queue CI). Then confirm the PR merged: +``` +gh pr view --json state --jq '.state' +``` +Expected: `MERGED`. If the state is `MERGED`, proceed to workpad update. + +If the merge queue rejects the PR (check failures on rebased code), run `gh pr view --json state,statusCheckRollup` to understand the failure, then output `[STAGE_FAILED: rebase]` — the queue failure means the code doesn't work after rebase against latest main. + +### Step 2b: If Conflicts — Write Rebase Brief and Signal Failure +If the PR has merge conflicts (mergeable is "CONFLICTING" or mergeStateStatus indicates conflicts): +1. Do NOT attempt to resolve conflicts — detect and signal only +2. Write `REBASE-BRIEF.md` to the worktree root with the following structure (keep under ~50 lines): + ```markdown + # Rebase Brief + ## Issue: {{ issue.identifier }} — {{ issue.title }} + + ## Conflicting Files + - `path/to/conflicted-file.ts` — nature of conflict if identifiable + + ## Recent Main Commits + (output of git log origin/main --oneline -10 since branch diverged) + + ## Semantic Context + - Any observations about what the conflicting PRs changed (from PR titles/commits) + ``` +3. To identify conflicting files, run `git fetch origin && git merge-tree $(git merge-base HEAD origin/main) HEAD origin/main` or attempt a dry-run merge +4. To get recent main commits, run `git log origin/main --oneline -10` +5. Output `[STAGE_FAILED: rebase]` as the very last line of your final message + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/templates/ci-minimal.yml b/pipeline-config/templates/ci-minimal.yml new file mode 100644 index 00000000..99bbd641 --- /dev/null +++ b/pipeline-config/templates/ci-minimal.yml @@ -0,0 +1,52 @@ +name: CI + +on: + push: + branches: + - main + pull_request: + merge_group: + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Detect package manager + id: detect-pm + run: | + if [ -f bun.lock ]; then + echo "manager=bun" >> "$GITHUB_OUTPUT" + echo "install=bun install --frozen-lockfile" >> "$GITHUB_OUTPUT" + elif [ -f pnpm-lock.yaml ]; then + echo "manager=pnpm" >> "$GITHUB_OUTPUT" + echo "install=pnpm install --frozen-lockfile" >> "$GITHUB_OUTPUT" + elif [ -f yarn.lock ]; then + echo "manager=yarn" >> "$GITHUB_OUTPUT" + echo "install=yarn install --frozen-lockfile" >> "$GITHUB_OUTPUT" + else + echo "manager=npm" >> "$GITHUB_OUTPUT" + echo "install=npm ci" >> "$GITHUB_OUTPUT" + fi + + - name: Setup pnpm + if: steps.detect-pm.outputs.manager == 'pnpm' + uses: pnpm/action-setup@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + cache: ${{ steps.detect-pm.outputs.manager }} + + - name: Install dependencies + run: ${{ steps.detect-pm.outputs.install }} + + - name: Build + run: npm run build --if-present + + - name: Test + run: npm test --if-present diff --git a/pipeline-config/validate.sh b/pipeline-config/validate.sh new file mode 100755 index 00000000..4e3f2054 --- /dev/null +++ b/pipeline-config/validate.sh @@ -0,0 +1,187 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Validation script for pipeline-config/WORKFLOW.md +# Checks that YAML frontmatter parses, referenced files exist, and scripts are executable. + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +WORKFLOW_FILE="$SCRIPT_DIR/WORKFLOW.md" +ERRORS=0 + +echo "=== Pipeline Config Validation ===" +echo "" + +# --- 1. Check WORKFLOW.md exists --- +if [ ! -f "$WORKFLOW_FILE" ]; then + echo "FAIL: WORKFLOW.md not found at $WORKFLOW_FILE" + exit 1 +fi +echo "OK: WORKFLOW.md found" + +# --- 2. Extract and validate YAML frontmatter --- +# Extract content between first and second --- +YAML_CONTENT=$(awk '/^---$/{n++;next} n==1{print} n==2{exit}' "$WORKFLOW_FILE") + +if [ -z "$YAML_CONTENT" ]; then + echo "FAIL: No YAML frontmatter found (expected content between --- delimiters)" + exit 1 +fi + +# Try parsing YAML — prefer node (yaml package available in symphony-ts) +SYMPHONY_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +NODE_PATH_PREFIX="" +if [ -d "$SYMPHONY_ROOT/node_modules" ]; then + NODE_PATH_PREFIX="NODE_PATH=$SYMPHONY_ROOT/node_modules" +fi + +if command -v node &>/dev/null && [ -n "$NODE_PATH_PREFIX" ]; then + if ! echo "$YAML_CONTENT" | env $NODE_PATH_PREFIX node -e " + const yaml = require('yaml'); + let data = ''; + process.stdin.on('data', c => data += c); + process.stdin.on('end', () => { yaml.parse(data); }); + " 2>/dev/null; then + echo "FAIL: YAML frontmatter failed to parse" + ERRORS=$((ERRORS + 1)) + else + echo "OK: YAML frontmatter parses successfully" + fi +elif command -v python3 &>/dev/null; then + if echo "$YAML_CONTENT" | python3 -c "import sys, yaml; yaml.safe_load(sys.stdin)" 2>/dev/null; then + echo "OK: YAML frontmatter parses successfully" + else + echo "FAIL: YAML frontmatter failed to parse" + ERRORS=$((ERRORS + 1)) + fi +else + echo "WARN: Neither node nor python3 (with PyYAML) available — skipping YAML parse check" +fi + +# --- 3. Check referenced prompt template files --- +echo "" +echo "--- Prompt Templates ---" + +PROMPTS_DIR="$SCRIPT_DIR/prompts" +PROMPT_FILES=( + "global.liquid" + "investigate.liquid" + "implement.liquid" + "review-adversarial.liquid" + "review-security.liquid" + "merge.liquid" +) + +# Also extract prompt file references from YAML +YAML_PROMPTS=$(echo "$YAML_CONTENT" | grep -oE 'prompts/[a-z-]+\.liquid' | sort -u) + +for prompt in "${PROMPT_FILES[@]}"; do + if [ -f "$PROMPTS_DIR/$prompt" ]; then + echo " OK: prompts/$prompt" + else + echo " FAIL: prompts/$prompt not found" + ERRORS=$((ERRORS + 1)) + fi +done + +# Check any YAML-referenced prompts that aren't in our expected list +for yaml_prompt in $YAML_PROMPTS; do + if [ -f "$SCRIPT_DIR/$yaml_prompt" ]; then + echo " OK: $yaml_prompt (referenced in YAML)" + else + echo " FAIL: $yaml_prompt (referenced in YAML) not found" + ERRORS=$((ERRORS + 1)) + fi +done + +# --- 4. Check hook scripts exist and are executable --- +echo "" +echo "--- Hook Scripts ---" + +HOOKS_DIR="$SCRIPT_DIR/hooks" +HOOK_FILES=( + "after-create.sh" + "before-run.sh" +) + +for hook in "${HOOK_FILES[@]}"; do + if [ ! -f "$HOOKS_DIR/$hook" ]; then + echo " FAIL: hooks/$hook not found" + ERRORS=$((ERRORS + 1)) + elif [ ! -x "$HOOKS_DIR/$hook" ]; then + echo " FAIL: hooks/$hook exists but is not executable" + ERRORS=$((ERRORS + 1)) + else + echo " OK: hooks/$hook (executable)" + fi +done + +# --- 5. Summarize stages and transitions --- +echo "" +echo "--- Stages & Transitions ---" + +if command -v node &>/dev/null && [ -n "$NODE_PATH_PREFIX" ]; then + echo "$YAML_CONTENT" | env $NODE_PATH_PREFIX node -e " + const yaml = require('yaml'); + let data = ''; + process.stdin.on('data', c => data += c); + process.stdin.on('end', () => { + const config = yaml.parse(data); + const stages = config.stages || {}; + const initial = stages.initial_stage; + delete stages.initial_stage; + + if (Object.keys(stages).length === 0) { + console.log(' WARN: No stages defined'); + return; + } + + if (initial) console.log(' Initial stage:', initial); + console.log(); + + for (const [name, s] of Object.entries(stages)) { + const parts = [' ' + name + ': type=' + (s.type || '?')]; + if (s.runner) parts.push('runner=' + s.runner); + if (s.model) parts.push('model=' + s.model); + if (s.max_turns) parts.push('max_turns=' + s.max_turns); + if (s.gate_type) parts.push('gate_type=' + s.gate_type); + if (s.reviewers && s.reviewers.length > 0) { + const roles = s.reviewers.map(r => r.role || '?'); + parts.push('reviewers=[' + roles.join(', ') + ']'); + } + const transitions = []; + for (const key of ['on_complete', 'on_approve', 'on_rework']) { + if (s[key]) transitions.push(key + '=' + s[key]); + } + if (transitions.length > 0) parts.push(transitions.join(' ')); + console.log(parts.join(' ')); + } + + console.log(); + console.log(' Flow:'); + if (initial && stages[initial]) { + const visited = new Set(); + let current = initial; + const flow = []; + while (current && !visited.has(current)) { + visited.add(current); + flow.push(current); + const stage = stages[current] || {}; + current = stage.on_complete || stage.on_approve; + } + console.log(' ' + flow.join(' → ')); + } + }); + " +else + echo " WARN: node with yaml package not available — skipping stage summary" +fi + +# --- Final result --- +echo "" +if [ "$ERRORS" -gt 0 ]; then + echo "RESULT: $ERRORS error(s) found" + exit 1 +else + echo "RESULT: All checks passed" + exit 0 +fi diff --git a/pipeline-config/workflows/WORKFLOW-household.md b/pipeline-config/workflows/WORKFLOW-household.md new file mode 100644 index 00000000..91d26599 --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-household.md @@ -0,0 +1,458 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 162c75be4fa7 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4327 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the Household product. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/workflows/WORKFLOW-hs-data.md b/pipeline-config/workflows/WORKFLOW-hs-data.md new file mode 100644 index 00000000..1da967bc --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-hs-data.md @@ -0,0 +1,458 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 174b19c8c7db + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4323 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the HS Data product. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/workflows/WORKFLOW-hs-mobile.md b/pipeline-config/workflows/WORKFLOW-hs-mobile.md new file mode 100644 index 00000000..40a854a2 --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-hs-mobile.md @@ -0,0 +1,458 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: a1f2d91e6868 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4325 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the HS Mobile product. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/workflows/WORKFLOW-hs-ui.md b/pipeline-config/workflows/WORKFLOW-hs-ui.md new file mode 100644 index 00000000..e4385e78 --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-hs-ui.md @@ -0,0 +1,458 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: b42a45f6c63e + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4324 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the HS UI product. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/workflows/WORKFLOW-jony-agent.md b/pipeline-config/workflows/WORKFLOW-jony-agent.md new file mode 100644 index 00000000..5d793669 --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-jony-agent.md @@ -0,0 +1,458 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 699c332ae6a9 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4322 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the Jony Agent product. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/workflows/WORKFLOW-stickerlabs.md b/pipeline-config/workflows/WORKFLOW-stickerlabs.md new file mode 100644 index 00000000..f6e64a5b --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-stickerlabs.md @@ -0,0 +1,458 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 746e66ff0e40 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4326 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the Sticker Labs product. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/workflows/WORKFLOW-symphony.md b/pipeline-config/workflows/WORKFLOW-symphony.md new file mode 100644 index 00000000..97ba38e2 --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-symphony.md @@ -0,0 +1,597 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: fdba14472043 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 5 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-5 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + + # --- Derive bare clone path (absolute, shared across workers) --- + REPO_SLUG=$(basename "${REPO_URL%.git}") + BARE_CLONE_DIR="$(cd .. && pwd)/.bare-clones" + BARE_CLONE="$BARE_CLONE_DIR/$REPO_SLUG" + WORKSPACE_DIR="$PWD" + ISSUE_KEY=$(basename "$WORKSPACE_DIR") + BRANCH_NAME="worktree/$ISSUE_KEY" + + # --- Create bare clone if it doesn't exist (race-safe) --- + mkdir -p "$BARE_CLONE_DIR" + if [ ! -d "$BARE_CLONE" ]; then + echo "Creating shared bare clone for $REPO_SLUG..." + if ! git clone --bare "$REPO_URL" "$BARE_CLONE" 2>/dev/null; then + # Another worker may have created it concurrently — verify it exists + if [ ! -d "$BARE_CLONE" ]; then + echo "ERROR: Failed to create bare clone at $BARE_CLONE" >&2 + exit 1 + fi + echo "Bare clone already created by another worker." + fi + else + echo "Using existing bare clone at $BARE_CLONE" + fi + + # --- Fetch latest refs into bare clone --- + git -C "$BARE_CLONE" fetch origin 2>/dev/null || echo "WARNING: fetch failed, using cached refs" >&2 + + # --- Create worktree for this issue --- + echo "Creating worktree for $ISSUE_KEY on branch $BRANCH_NAME..." + git -C "$BARE_CLONE" worktree add "$WORKSPACE_DIR" -b "$BRANCH_NAME" main + + # --- Install dependencies --- + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete (worktree: $BRANCH_NAME)." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Resolve git dir (worktree .git is a file, not a directory) --- + resolve_git_dir() { + if [ -f .git ]; then + # Worktree: .git is a file containing "gitdir: /path/to/.bare-clones/repo/worktrees/..." + sed 's/^gitdir: //' .git + elif [ -d .git ]; then + echo ".git" + else + echo "" + fi + } + GIT_DIR=$(resolve_git_dir) + + # --- Git lock handling (works for both worktrees and regular clones) --- + wait_for_git_lock() { + if [ -z "$GIT_DIR" ]; then return; fi + local lock_file="$GIT_DIR/index.lock" + local attempt=0 + while [ -f "$lock_file" ] && [ $attempt -lt 6 ]; do + echo "WARNING: $lock_file exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f "$lock_file" ]; then + echo "WARNING: $lock_file still exists after 30s, removing stale lock" >&2 + rm -f "$lock_file" + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + # In bare clone worktrees, refs are stored as refs/heads/, not refs/remotes/origin/ + # Try origin/ first (regular clone), fall back to (bare clone worktree) + if git show-ref --verify --quiet "refs/remotes/origin/$CURRENT_BRANCH"; then + REBASE_TARGET="origin/$CURRENT_BRANCH" + else + REBASE_TARGET="$CURRENT_BRANCH" + fi + if ! git rebase "$REBASE_TARGET" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + # Import rebase brief into CLAUDE.md if it exists + if [ -f "REBASE-BRIEF.md" ]; then + if ! grep -q "@REBASE-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@REBASE-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + + # --- Handle case where worktree was never fully set up --- + if [ ! -e .git ]; then + echo "No git repo in workspace, nothing to clean up." + exit 0 + fi + + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + + echo "Cleaning up branch $BRANCH..." + + # --- Close any open PR for this branch --- + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + + # --- Remove worktree entry from bare clone --- + REPO_SLUG=$(basename "${REPO_URL%.git}") + BARE_CLONE="$(cd .. && pwd)/.bare-clones/$REPO_SLUG" + if [ -d "$BARE_CLONE" ]; then + echo "Removing worktree entry from bare clone..." + git -C "$BARE_CLONE" worktree remove "$PWD" --force 2>/dev/null || true + git -C "$BARE_CLONE" branch -D "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4321 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + # Fast-track: issues labeled "trivial" skip the investigate stage and start at implement. + fast_track: + label: trivial + initial_stage: implement + + investigate: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-5 + max_turns: 5 + on_complete: done + on_rework: implement + max_rework: 2 + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the Symphony orchestrator (symphony-ts). This is the pipeline orchestration layer that schedules and coordinates autonomous development agents. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} + +**First, determine the rework type:** + +### If `REBASE-BRIEF.md` exists in the worktree root — this is a REBASE REWORK: +1. Read `REBASE-BRIEF.md` for context on conflicting files and recent main commits +2. Rebase the current branch onto `origin/main` and resolve all merge conflicts +3. Run all `# Verify:` commands from the spec to ensure the build still passes +4. Delete `REBASE-BRIEF.md` after successful rebase and verification +5. Do NOT modify code beyond what is necessary to resolve conflicts +6. If conflicts cannot be resolved cleanly, output `[STAGE_FAILED: verify]` with details + +### Else if `## Review Findings` comments exist — this is a REVIEW REWORK: +Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. + +### Merge Queue Context +This repo uses GitHub's merge queue. When you run `gh pr merge`, GitHub will: +- **If checks passed**: Add the PR to the merge queue. You'll see: `"✓ Pull request ...#N will be added to the merge queue for main when ready"` +- **If checks pending**: Enable auto-merge. You'll see: `"✓ Pull request ...#N will be automatically merged via squash when all requirements are met"` + +In BOTH cases, the merge is not immediate — GitHub queues it, rebases, runs CI on the rebased version, then merges. This is normal behavior. Do NOT interpret it as a failure. + +### Step 1: Merge the PR +Run `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")`. This single command is sufficient. Do NOT: +- Retry the merge command if you see a "merge queue" or "auto-merge" response — that IS success +- Run `gh pr merge` with `--admin` to bypass the queue +- Modify any code in this stage + +### Step 2: Wait for Merge to Complete +After the merge command succeeds, wait for the merge queue to finish: +``` +gh pr checks --watch --required --fail-fast +``` +This blocks until all checks complete (including merge queue CI). Then confirm the PR merged: +``` +gh pr view --json state --jq '.state' +``` +Expected: `MERGED`. If the state is `MERGED`, proceed to workpad update. + +If the merge queue rejects the PR (check failures on rebased code), run `gh pr view --json state,statusCheckRollup` to understand the failure, then output `[STAGE_FAILED: rebase]` — the queue failure means the code doesn't work after rebase against latest main. + +### Step 2b: If Conflicts — Write Rebase Brief and Signal Failure +If the PR has merge conflicts (mergeable is "CONFLICTING" or mergeStateStatus indicates conflicts): +1. Do NOT attempt to resolve conflicts — detect and signal only +2. Write `REBASE-BRIEF.md` to the worktree root with the following structure (keep under ~50 lines): + ```markdown + # Rebase Brief + ## Issue: {{ issue.identifier }} — {{ issue.title }} + + ## Conflicting Files + - `path/to/conflicted-file.ts` — nature of conflict if identifiable + + ## Recent Main Commits + (output of git log origin/main --oneline -10 since branch diverged) + + ## Semantic Context + - Any observations about what the conflicting PRs changed (from PR titles/commits) + ``` +3. To identify conflicting files, run `git fetch origin && git merge-tree $(git merge-base HEAD origin/main) HEAD origin/main` or attempt a dry-run merge +4. To get recent main commits, run `git log origin/main --oneline -10` +5. Output `[STAGE_FAILED: rebase]` as the very last line of your final message + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pipeline-config/workflows/WORKFLOW-toys.md b/pipeline-config/workflows/WORKFLOW-toys.md new file mode 100644 index 00000000..729748a8 --- /dev/null +++ b/pipeline-config/workflows/WORKFLOW-toys.md @@ -0,0 +1,480 @@ +--- +tracker: + kind: linear + api_key: $LINEAR_API_KEY + project_slug: 28f6f9f2c1a3 + active_states: + - Todo + - In Progress + - In Review + - Blocked + - Resume + terminal_states: + - Done + - Cancelled + +escalation_state: Blocked + +polling: + interval_ms: 30000 + +workspace: + root: ./workspaces + +agent: + max_concurrent_agents: 1 + max_turns: 30 + max_retry_backoff_ms: 300000 + +codex: + stall_timeout_ms: 1800000 + +runner: + kind: claude-code + model: claude-sonnet-4-6 + +hooks: + after_create: | + set -euo pipefail + if [ -z "${REPO_URL:-}" ]; then + echo "ERROR: REPO_URL environment variable is not set" >&2 + exit 1 + fi + echo "Cloning $REPO_URL into workspace..." + git clone --depth 1 "$REPO_URL" . + if [ -f package.json ]; then + if [ -f bun.lock ]; then + bun install --frozen-lockfile + elif [ -f pnpm-lock.yaml ]; then + pnpm install --frozen-lockfile + elif [ -f yarn.lock ]; then + yarn install --frozen-lockfile + else + npm install + fi + fi + # --- Build code graph (best-effort) --- + if command -v code-review-graph >/dev/null 2>&1; then + echo "Building code review graph..." + code-review-graph build --repo . || echo "WARNING: code-review-graph build failed, continuing without graph" >&2 + else + echo "WARNING: code-review-graph not installed, skipping graph build" >&2 + fi + echo "Workspace setup complete." + before_run: | + set -euo pipefail + echo "Syncing workspace with upstream..." + + # --- Git lock handling --- + wait_for_git_lock() { + local attempt=0 + while [ -f .git/index.lock ] && [ $attempt -lt 6 ]; do + echo "WARNING: .git/index.lock exists, waiting 5s (attempt $((attempt+1))/6)..." >&2 + sleep 5 + attempt=$((attempt+1)) + done + if [ -f .git/index.lock ]; then + echo "WARNING: .git/index.lock still exists after 30s, removing stale lock" >&2 + rm -f .git/index.lock + fi + } + + # --- Git fetch with retry --- + fetch_ok=false + for attempt in 1 2 3; do + wait_for_git_lock + if git fetch origin 2>/dev/null; then + fetch_ok=true + break + fi + echo "WARNING: git fetch failed (attempt $attempt/3), retrying in 2s..." >&2 + sleep 2 + done + if [ "$fetch_ok" = false ]; then + echo "WARNING: git fetch failed after 3 attempts, continuing with stale refs" >&2 + fi + + # --- Rebase (best-effort) --- + CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") + if [ "$CURRENT_BRANCH" = "main" ] || [ "$CURRENT_BRANCH" = "master" ]; then + echo "On $CURRENT_BRANCH — rebasing onto latest..." + wait_for_git_lock + if ! git rebase "origin/$CURRENT_BRANCH" 2>/dev/null; then + echo "WARNING: Rebase failed, aborting rebase" >&2 + git rebase --abort 2>/dev/null || true + fi + else + echo "On feature branch $CURRENT_BRANCH — skipping rebase, fetch only." + fi + # Import investigation brief into CLAUDE.md if it exists + if [ -f "INVESTIGATION-BRIEF.md" ]; then + if ! grep -q "@INVESTIGATION-BRIEF.md" CLAUDE.md 2>/dev/null; then + echo '' >> CLAUDE.md + echo '@INVESTIGATION-BRIEF.md' >> CLAUDE.md + fi + fi + echo "Workspace synced." + before_remove: | + set -uo pipefail + BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "") + if [ -z "$BRANCH" ] || [ "$BRANCH" = "main" ] || [ "$BRANCH" = "master" ] || [ "$BRANCH" = "HEAD" ]; then + exit 0 + fi + echo "Cleaning up branch $BRANCH..." + # Close any open PR for this branch (also deletes the remote branch via --delete-branch) + PR_NUM=$(gh pr list --head "$BRANCH" --state open --json number --jq '.[0].number' 2>/dev/null || echo "") + if [ -n "$PR_NUM" ]; then + echo "Closing PR #$PR_NUM and deleting remote branch..." + gh pr close "$PR_NUM" --delete-branch 2>/dev/null || true + else + # No open PR — just delete the remote branch if it exists + echo "No open PR found, deleting remote branch..." + git push origin --delete "$BRANCH" 2>/dev/null || true + fi + echo "Cleanup complete." + timeout_ms: 120000 + +server: + port: 4328 + +observability: + dashboard_enabled: true + refresh_ms: 5000 + +stages: + initial_stage: investigate + + investigate: + type: agent + runner: claude-code + model: claude-sonnet-4-6 + max_turns: 8 + linear_state: In Progress + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: implement + + implement: + type: agent + runner: claude-code + model: claude-sonnet-4-6 + max_turns: 30 + mcp_servers: + code-review-graph: + command: uvx + args: + - code-review-graph + - serve + on_complete: review + + review: + type: agent + runner: claude-code + model: claude-opus-4-6 + max_turns: 15 + max_rework: 3 + linear_state: In Review + on_complete: merge + on_rework: implement + + merge: + type: agent + runner: claude-code + model: claude-sonnet-4-6 + max_turns: 5 + on_complete: done + + done: + type: terminal + linear_state: Done +--- + +You are running in headless/unattended mode. Do NOT use interactive skills, slash commands, or plan mode. Do not prompt for user input. Complete your work autonomously. + +You are working on the pipeline-test-1 repo (Hono/Bun Tasks API). This is used for experiments, pipeline testing, and throwaway work under the TOYS team. + +Implement only what your task specifies. If you encounter missing functionality that another task covers, add a TODO comment rather than implementing it. Do not refactor surrounding code or add unsolicited improvements. + +Never hardcode localhost or 127.0.0.1. Use the $BASE_URL environment variable for all URL references. Set BASE_URL=localhost: during local development. + +# {{ issue.identifier }} — {{ issue.title }} + +You are working on Linear issue {{ issue.identifier }}. + +## Issue Description + +{{ issue.description }} + +{% if issue.labels.size > 0 %} +Labels: {{ issue.labels | join: ", " }} +{% endif %} + +{% if stageName == "investigate" %} +## Stage: Investigation +You are in the INVESTIGATE stage. Your job is to analyze the issue and create an implementation plan. + +{% if issue.state == "Resume" %} +## RESUME CONTEXT +This issue was previously blocked. Check the issue comments for a `## Resume Context` comment explaining what changed. Focus your investigation on the blocking reasons and what has been updated. +{% endif %} + +- Read the codebase to understand existing patterns and architecture +- Identify which files need to change and what the approach should be +- Post a comment on the Linear issue (via `gh`) with your investigation findings and proposed implementation plan +- Do NOT implement code, create branches, or open PRs in this stage — investigation only + +### Workpad (investigate) +After completing your investigation, create the workpad comment on this Linear issue. +**Preferred**: Write the workpad content to a local `workpad.md` file and call `sync_workpad` with `issue_id` and `file_path`. Save the returned `comment_id` for future updates. +**Fallback** (if `sync_workpad` is unavailable): +1. First, search for an existing workpad comment using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` + Look for a comment whose body starts with `## Workpad`. +2. If no workpad comment exists, create one using `commentCreate`. If one exists, update it using `commentUpdate`. +3. Use this template for the workpad body: + ``` + ## Workpad + **Environment**: :@ + + ### Plan + - [ ] Step 1 derived from issue description + - [ ] Step 2 ... + - [ ] Substep if needed + + ### Acceptance Criteria + - [ ] Criterion from issue requirements + - [ ] ... + + ### Validation + - `` + - `` + + ### Notes + - Investigation complete. Plan posted. + + ### Confusions + (Only add this section if something in the issue was genuinely unclear.) + ``` +4. Fill the Plan and Acceptance Criteria sections from your investigation findings. + +### Required: Structured Map + +After your prose findings, you MUST include a structured map section in the workpad with the following format: + +``` +### Files to Change +- path/to/file.ts:LINE_START-LINE_END — what needs to change and why + +### Read Order +1. path/to/primary.ts (primary change target) +2. path/to/types.ts (type definitions needed) +3. path/to/related.test.ts (test file to update) + +### Key Dependencies +- FunctionX is called from A, B, C +- InterfaceY is used in D, E +``` + +This structured map helps the implementation agent navigate the codebase efficiently without re-reading files you already explored. + +## Investigation Brief + +After posting the workpad, write `INVESTIGATION-BRIEF.md` to the worktree root. This file gives the implement-stage agent a concise orientation without re-reading the codebase. + +Keep the brief under ~200 lines (~4K tokens). Use exactly this structure: + +```markdown +# Investigation Brief +## Issue: [ISSUE-KEY] — [Title] + +## Objective +One-paragraph summary of what needs to be done and why. + +## Relevant Files (ranked by importance) +1. `src/path/to/primary-file.ts` — Main file to modify. [What it does, why it matters] +2. `src/path/to/secondary-file.ts` — Related dependency. [What to know] +3. `tests/path/to/test-file.test.ts` — Existing tests. [Coverage notes] + +## Key Code Patterns +- Pattern X is used for Y (see `file.ts:42-67`) +- The codebase uses Z convention for this type of change + +## Architecture Context +- Brief description of relevant subsystem +- Data flow: A → B → C +- Key interfaces/types to be aware of + +## Test Strategy +- Existing test files and what they cover +- Test patterns used (describe/it, vitest, mocking approach) +- Edge cases to cover + +## Gotchas & Constraints +- Don't modify X because Y +- Z is deprecated, use W instead + +## Key Code Excerpts +[2-3 most important code blocks with file path and line numbers] +``` + +## Completion Signals +When you are done: +- If investigation is complete and workpad is posted: output `[STAGE_COMPLETE]` +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "implement" %} +## Stage: Implementation +You are in the IMPLEMENT stage. Read INVESTIGATION-BRIEF.md first if it exists in the worktree root. It contains targeted findings from the investigation stage including relevant files, code patterns, architecture context, and test strategy. Use it to skip codebase exploration and go straight to implementation. If the file does not exist, fall back to reading issue comments for the investigation plan. + +{% if reworkCount > 0 %} +## REWORK ATTEMPT {{ reworkCount }} +This is a rework attempt. Read ALL comments on this Linear issue starting with `## Review Findings`. These contain the specific findings you must fix. +- Fix ONLY the identified findings +- Do not modify code outside the affected files unless strictly necessary +- Do not reinterpret the spec +- If a finding conflicts with the spec, output `[STAGE_FAILED: spec]` with an explanation +{% endif %} + +## Implementation Steps + +1. Read any investigation notes from previous comments on this issue. +2. Create a feature branch from the issue's suggested branch name{% if issue.branch_name %} (`{{ issue.branch_name }}`){% endif %}, or use `{{ issue.identifier | downcase }}/`. +3. Implement the task per the issue description. +4. Write tests as needed. +5. Run all `# Verify:` commands from the spec. You are not done until every verify command exits 0. +6. Before creating the PR, capture structured tool output: + - Run `npx tsc --noEmit 2>&1` and include output in PR body under `## Tool Output > TypeScript` + - Run `npm test 2>&1` and include summary in PR body under `## Tool Output > Tests` + - Run `semgrep scan --config auto --json 2>&1` (if available) and include raw output in PR body under `## SAST Output` + - Do NOT filter or interpret SAST results — include them verbatim. +7. Commit your changes with message format: `feat({{ issue.identifier }}): `. +8. Open a PR targeting this repo (not its upstream fork parent) via `gh pr create --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` with the issue description in the PR body. Include the Tool Output and SAST Output sections. +9. Link the PR to the Linear issue by including `{{ issue.identifier }}` in the PR title or body. + +### Workpad (implement) +Update the workpad comment at these milestones during implementation. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id` (from the investigate stage). +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate` with the comment's `id`. +3. At each milestone, update the relevant sections: + - **After starting implementation**: Check off Plan items as you complete them. + - **After implementation is done**: Add a Notes entry (e.g., `- Implementation complete. PR # opened.`), update Validation with actual commands run. + - **After all tests pass**: Check off Acceptance Criteria items, add a Notes entry confirming validation. +4. Do NOT update the workpad after every small code change — only at the milestones above. +5. If no workpad comment exists (e.g., investigation stage was skipped), create one using the template from the investigate stage instructions. + +10. After all verify commands pass and before creating the PR, run `/simplify focus on code reuse and efficiency` to check for codebase reuse opportunities and efficiency improvements. If simplify makes changes, re-run verify commands to confirm nothing broke. If tests fail after simplify, revert the simplify changes (`git checkout -- .`) and proceed without them. + +11. **If your changes are app-touching** (UI, API responses visible to users, frontend assets), capture a screenshot after validation passes and embed it in the workpad: + - Take a screenshot (e.g., `npx playwright screenshot` or `curl` the endpoint and save the response). + - Upload it using the fileUpload flow described in the **Media in Workpads** section. + - Add the image to the workpad comment under Notes: `![screenshot after validation](assetUrl)`. + - Skip this step for non-visual changes (library code, configs, internal refactors). + +## Completion Signals +When you are done: +- If all verify commands pass and PR is created: output `[STAGE_COMPLETE]` +- If you cannot resolve a verify failure after 3 attempts: output `[STAGE_FAILED: verify]` with the failing command and output +- If the spec is ambiguous or contradictory: output `[STAGE_FAILED: spec]` with an explanation +- If you hit infrastructure issues (API limits, network errors): output `[STAGE_FAILED: infra]` with details +{% endif %} + +{% if stageName == "review" %} +## Stage: Review +You are a review agent. Load and execute the /pipeline-review skill. + +The PR for this issue is on the current branch. The issue description contains the frozen spec. The PR body contains Tool Output and SAST Output sections from the implementation agent. + +If all findings are clean or only P3/theoretical: output `[STAGE_COMPLETE]` +If surviving P1/P2 findings exist: post them as a `## Review Findings` comment on the Linear issue, then output `[STAGE_FAILED: review]` with a one-line summary. +{% endif %} + +{% if stageName == "merge" %} +## Stage: Merge +You are in the MERGE stage. The PR has been reviewed and approved. +- Merge the PR via `gh pr merge --squash --delete-branch --repo $(git remote get-url origin | sed "s|.*github.com/||;s|\.git$||")` +- Verify the merge succeeded on the main branch +- Do NOT modify code in this stage + +### Workpad (merge) +After merging the PR, update the workpad comment one final time. +**Preferred**: Edit your local `workpad.md` file and call `sync_workpad` with `issue_id`, `file_path`, and `comment_id`. +**Fallback** (if `sync_workpad` is unavailable): +1. Search for the existing workpad comment (body starts with `## Workpad`) using `linear_graphql`: + ```graphql + query { issue(id: "{{ issue.id }}") { comments { nodes { id body } } } } + ``` +2. Update it using `commentUpdate`: + - Check off all remaining Plan and Acceptance Criteria items. + - Add a final Notes entry: `- PR merged. Issue complete.` + +- When you have successfully merged the PR, output the exact text `[STAGE_COMPLETE]` as the very last line of your final message. +{% endif %} + +## Scope Discipline + +- If your task requires a capability that doesn't exist in the codebase and isn't specified in the spec, stop and comment what's missing on the issue. Don't scaffold unspecced infrastructure. +- Tests must be runnable against $BASE_URL (no localhost assumptions in committed tests). + +## Workpad Rules + +You maintain a single persistent `## Workpad` comment on the Linear issue. This is your structured progress document. + +**Critical rules:** +- **Never create multiple workpad comments.** Always search for an existing comment with `## Workpad` in its body before creating a new one. +- **Update at milestones only** — plan finalized, implementation done, validation complete. Do NOT sync after every minor change. +- **Prefer `sync_workpad` over raw GraphQL.** Write your workpad content to a local `workpad.md` file, then call `sync_workpad` with `issue_id`, `file_path`, and optionally `comment_id` (returned from the first sync). This keeps the workpad body out of your conversation context and saves tokens. Fall back to `linear_graphql` only if `sync_workpad` is unavailable. +- **`linear_graphql` fallback patterns** (use only if `sync_workpad` is unavailable): + - Search comments: `query { issue(id: "") { comments { nodes { id body } } } }` + - Create comment: `mutation { commentCreate(input: { issueId: "", body: "" }) { comment { id } } }` + - Update comment: `mutation { commentUpdate(id: "", input: { body: "" }) { comment { id } } }` +- **Never use `__type` or `__schema` introspection queries** against the Linear API. Use the exact patterns above. + +## Media in Workpads (fileUpload) + +When you capture evidence (screenshots, recordings, logs) during implementation, embed them in the workpad using Linear's `fileUpload` API. This is a 3-step flow: + +**Step 1: Get upload URL** via `linear_graphql`: +```graphql +mutation($filename: String!, $contentType: String!, $size: Int!) { + fileUpload(filename: $filename, contentType: $contentType, size: $size, makePublic: true) { + success + uploadFile { uploadUrl assetUrl headers { key value } } + } +} +``` + +**Step 2: Upload file bytes** using `curl`: +```bash +# Build header flags from the returned headers array +curl -X PUT -H "Content-Type: " \ + -H ": " -H ": " \ + --data-binary @ "" +``` + +**Step 3: Embed in workpad** — add `![description](assetUrl)` to the workpad comment body (either via `sync_workpad` or `commentUpdate`). + +**Supported content types**: `image/png`, `image/jpeg`, `image/gif`, `video/mp4`, `application/pdf`. + +**When to capture media**: Only when evidence adds value — screenshots of UI changes, recordings of interaction flows, or error screenshots for debugging. Do not upload media for non-visual tasks (e.g., pure API or library changes). + +## Documentation Maintenance + +- If you add a new module, API endpoint, or significant abstraction, update the relevant docs/ file and the AGENTS.md Documentation Map entry. If no relevant doc exists, create one following the docs/ conventions (# Title, > Last updated header). +- If a docs/ file you reference during implementation is stale or missing, update/create it as part of your implementation. Include the update in the same PR as your code changes — never in a separate PR. +- If you make a non-obvious architectural decision during implementation, create a design doc in docs/design-docs/ following the ADR format (numbered, with Status line). Add it to the AGENTS.md design docs table. +- When you complete your implementation, update the > Last updated date on any docs/ file you modified. +- Do not update docs/generated/ files — those are auto-generated and will be overwritten. +- Commit doc updates in the same PR as code changes, not separately. diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5356699a..ba446308 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -8,6 +8,30 @@ importers: .: dependencies: + '@ai-sdk/provider': + specifier: ^3.0.8 + version: 3.0.8 + '@google/gemini-cli-core': + specifier: ^0.33.2 + version: 0.33.2(express@5.2.1) + '@google/genai': + specifier: ^1.45.0 + version: 1.45.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) + '@slack/bolt': + specifier: ^4.6.0 + version: 4.6.0(@types/express@5.0.6) + '@slack/web-api': + specifier: ^7.15.0 + version: 7.15.0 + ai: + specifier: ^6.0.116 + version: 6.0.116(zod@4.3.6) + ai-sdk-provider-claude-code: + specifier: ^3.4.4 + version: 3.4.4(zod@4.3.6) + ai-sdk-provider-gemini-cli: + specifier: ^2.0.1 + version: 2.0.1(@modelcontextprotocol/sdk@1.27.1(zod@3.25.76))(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(zod@4.3.6) graphql: specifier: ^16.13.1 version: 16.13.1 @@ -32,10 +56,55 @@ importers: version: 5.9.3 vitest: specifier: ^3.0.8 - version: 3.2.4(@types/node@22.19.15)(yaml@2.8.2) + version: 3.2.4(@types/debug@4.1.13)(@types/node@22.19.15)(yaml@2.8.2) packages: + '@a2a-js/sdk@0.3.13': + resolution: {integrity: sha512-BZr0f9JVNQs3GKOM9xINWCh6OKIJWZFPyqqVqTym5mxO2Eemc6I/0zL7zWnljHzGdaf5aZQyQN5xa6PSH62q+A==} + engines: {node: '>=18'} + peerDependencies: + '@bufbuild/protobuf': ^2.10.2 + '@grpc/grpc-js': ^1.11.0 + express: ^4.21.2 || ^5.1.0 + peerDependenciesMeta: + '@bufbuild/protobuf': + optional: true + '@grpc/grpc-js': + optional: true + express: + optional: true + + '@ai-sdk/gateway@3.0.66': + resolution: {integrity: sha512-SIQ0YY0iMuv+07HLsZ+bB990zUJ6S4ujORAh+Jv1V2KGNn73qQKnGO0JBk+w+Res8YqOFSycwDoWcFlQrVxS4A==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/provider-utils@4.0.19': + resolution: {integrity: sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + '@ai-sdk/provider@3.0.8': + resolution: {integrity: sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==} + engines: {node: '>=18'} + + '@anthropic-ai/claude-agent-sdk@0.2.76': + resolution: {integrity: sha512-HZxvnT8ZWkzCnQygaYCA0dl8RSUzuVbxE1YG4ecy6vh4nQbTT36CxUxBy+QVdR12pPQluncC0mCOLhI2918Eaw==} + engines: {node: '>=18.0.0'} + peerDependencies: + zod: ^4.0.0 + + '@babel/code-frame@7.29.0': + resolution: {integrity: sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + '@biomejs/biome@1.9.4': resolution: {integrity: sha512-1rkd7G70+o9KkTn5KLmDYXihGoTaIGO9PIIN2ZB7UJxFrWw04CZHPYiMRjYsaDvVV7hP1dYNRLxSANLaBFGpog==} engines: {node: '>=14.21.3'} @@ -249,775 +318,5396 @@ packages: cpu: [x64] os: [win32] - '@jridgewell/sourcemap-codec@1.5.5': - resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + '@google-cloud/common@5.0.2': + resolution: {integrity: sha512-V7bmBKYQyu0eVG2BFejuUjlBt+zrya6vtsKdY+JxMM/dNntPF41vZ9+LhOshEUH01zOHEqBSvI7Dad7ZS6aUeA==} + engines: {node: '>=14.0.0'} - '@rollup/rollup-android-arm-eabi@4.59.0': - resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} - cpu: [arm] - os: [android] + '@google-cloud/logging@11.2.1': + resolution: {integrity: sha512-2h9HBJG3OAsvzXmb81qXmaTPfXYU7KJTQUxunoOKFGnY293YQ/eCkW1Y5mHLocwpEqeqQYT/Qvl6Tk+Q7PfStw==} + engines: {node: '>=14.0.0'} - '@rollup/rollup-android-arm64@4.59.0': - resolution: {integrity: sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==} - cpu: [arm64] - os: [android] + '@google-cloud/opentelemetry-cloud-monitoring-exporter@0.21.0': + resolution: {integrity: sha512-+lAew44pWt6rA4l8dQ1gGhH7Uo95wZKfq/GBf9aEyuNDDLQ2XppGEEReu6ujesSqTtZ8ueQFt73+7SReSHbwqg==} + engines: {node: '>=18'} + peerDependencies: + '@opentelemetry/api': ^1.9.0 + '@opentelemetry/core': ^2.0.0 + '@opentelemetry/resources': ^2.0.0 + '@opentelemetry/sdk-metrics': ^2.0.0 - '@rollup/rollup-darwin-arm64@4.59.0': - resolution: {integrity: sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==} + '@google-cloud/opentelemetry-cloud-trace-exporter@3.0.0': + resolution: {integrity: sha512-mUfLJBFo+ESbO0dAGboErx2VyZ7rbrHcQvTP99yH/J72dGaPbH2IzS+04TFbTbEd1VW5R9uK3xq2CqawQaG+1Q==} + engines: {node: '>=18'} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + '@opentelemetry/core': ^2.0.0 + '@opentelemetry/resources': ^2.0.0 + '@opentelemetry/sdk-trace-base': ^2.0.0 + + '@google-cloud/opentelemetry-resource-util@3.0.0': + resolution: {integrity: sha512-CGR/lNzIfTKlZoZFfS6CkVzx+nsC9gzy6S8VcyaLegfEJbiPjxbMLP7csyhJTvZe/iRRcQJxSk0q8gfrGqD3/Q==} + engines: {node: '>=18'} + peerDependencies: + '@opentelemetry/core': ^2.0.0 + '@opentelemetry/resources': ^2.0.0 + + '@google-cloud/paginator@5.0.2': + resolution: {integrity: sha512-DJS3s0OVH4zFDB1PzjxAsHqJT6sKVbRwwML0ZBP9PbU7Yebtu/7SWMRzvO2J3nUi9pRNITCfu4LJeooM2w4pjg==} + engines: {node: '>=14.0.0'} + + '@google-cloud/precise-date@4.0.0': + resolution: {integrity: sha512-1TUx3KdaU3cN7nfCdNf+UVqA/PSX29Cjcox3fZZBtINlRrXVTmUkQnCKv2MbBUbCopbK4olAT1IHl76uZyCiVA==} + engines: {node: '>=14.0.0'} + + '@google-cloud/projectify@4.0.0': + resolution: {integrity: sha512-MmaX6HeSvyPbWGwFq7mXdo0uQZLGBYCwziiLIGq5JVX+/bdI3SAq6bP98trV5eTWfLuvsMcIC1YJOF2vfteLFA==} + engines: {node: '>=14.0.0'} + + '@google-cloud/promisify@4.0.0': + resolution: {integrity: sha512-Orxzlfb9c67A15cq2JQEyVc7wEsmFBmHjZWZYQMUyJ1qivXyMwdyNOs9odi79hze+2zqdTtu1E19IM/FtqZ10g==} + engines: {node: '>=14'} + + '@google/gemini-cli-core@0.22.4': + resolution: {integrity: sha512-tJXajzxWXkSU8jVfwPG6rEFtUg9Bi3I+YAcTUzLEeaNITHJX+1IV0cVvi3/qguz6dWAnYM0mQ3U9jXvfyvIDPg==} + engines: {node: '>=20'} + + '@google/gemini-cli-core@0.33.2': + resolution: {integrity: sha512-uZJqueJ/W/VgHgnsmA5QTixZBNj61vXuDLmFN0t3WATLqYEM3dGcuPYIOYGHagH4RxIdXXOQ9K2B3b3mTN8Hug==} + engines: {node: '>=20'} + + '@google/genai@1.30.0': + resolution: {integrity: sha512-3MRcgczBFbUat1wIlZoLJ0vCCfXgm7Qxjh59cZi2X08RgWLtm9hKOspzp7TOg1TV2e26/MLxR2GR5yD5GmBV2w==} + engines: {node: '>=20.0.0'} + peerDependencies: + '@modelcontextprotocol/sdk': ^1.20.1 + peerDependenciesMeta: + '@modelcontextprotocol/sdk': + optional: true + + '@google/genai@1.45.0': + resolution: {integrity: sha512-+sNRWhKiRibVgc4OKi7aBJJ0A7RcoVD8tGG+eFkqxAWRjASDW+ktS9lLwTDnAxZICzCVoeAdu8dYLJVTX60N9w==} + engines: {node: '>=20.0.0'} + peerDependencies: + '@modelcontextprotocol/sdk': ^1.25.2 + peerDependenciesMeta: + '@modelcontextprotocol/sdk': + optional: true + + '@grpc/grpc-js@1.14.3': + resolution: {integrity: sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA==} + engines: {node: '>=12.10.0'} + + '@grpc/proto-loader@0.7.15': + resolution: {integrity: sha512-tMXdRCfYVixjuFK+Hk0Q1s38gV9zDiDJfWL3h1rv4Qc39oILCu1TRTDt7+fGUI8K4G1Fj125Hx/ru3azECWTyQ==} + engines: {node: '>=6'} + hasBin: true + + '@grpc/proto-loader@0.8.0': + resolution: {integrity: sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==} + engines: {node: '>=6'} + hasBin: true + + '@hono/node-server@1.19.11': + resolution: {integrity: sha512-dr8/3zEaB+p0D2n/IUrlPF1HZm586qgJNXK1a9fhg/PzdtkK7Ksd5l312tJX2yBuALqDYBlG20QEbayqPyxn+g==} + engines: {node: '>=18.14.1'} + peerDependencies: + hono: ^4 + + '@iarna/toml@2.2.5': + resolution: {integrity: sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==} + + '@img/sharp-darwin-arm64@0.34.5': + resolution: {integrity: sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [darwin] - '@rollup/rollup-darwin-x64@4.59.0': - resolution: {integrity: sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==} + '@img/sharp-darwin-x64@0.34.5': + resolution: {integrity: sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [darwin] - '@rollup/rollup-freebsd-arm64@4.59.0': - resolution: {integrity: sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==} + '@img/sharp-libvips-darwin-arm64@1.2.4': + resolution: {integrity: sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==} cpu: [arm64] - os: [freebsd] + os: [darwin] - '@rollup/rollup-freebsd-x64@4.59.0': - resolution: {integrity: sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==} + '@img/sharp-libvips-darwin-x64@1.2.4': + resolution: {integrity: sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==} cpu: [x64] - os: [freebsd] + os: [darwin] - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': - resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==} - cpu: [arm] + '@img/sharp-libvips-linux-arm64@1.2.4': + resolution: {integrity: sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==} + cpu: [arm64] os: [linux] libc: [glibc] - '@rollup/rollup-linux-arm-musleabihf@4.59.0': - resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==} + '@img/sharp-libvips-linux-arm@1.2.4': + resolution: {integrity: sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==} cpu: [arm] os: [linux] - libc: [musl] + libc: [glibc] - '@rollup/rollup-linux-arm64-gnu@4.59.0': - resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==} - cpu: [arm64] + '@img/sharp-libvips-linux-x64@1.2.4': + resolution: {integrity: sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==} + cpu: [x64] os: [linux] libc: [glibc] - '@rollup/rollup-linux-arm64-musl@4.59.0': - resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==} + '@img/sharp-libvips-linuxmusl-arm64@1.2.4': + resolution: {integrity: sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==} cpu: [arm64] os: [linux] libc: [musl] - '@rollup/rollup-linux-loong64-gnu@4.59.0': - resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==} - cpu: [loong64] - os: [linux] - libc: [glibc] - - '@rollup/rollup-linux-loong64-musl@4.59.0': - resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==} - cpu: [loong64] + '@img/sharp-libvips-linuxmusl-x64@1.2.4': + resolution: {integrity: sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==} + cpu: [x64] os: [linux] libc: [musl] - '@rollup/rollup-linux-ppc64-gnu@4.59.0': - resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==} - cpu: [ppc64] + '@img/sharp-linux-arm64@0.34.5': + resolution: {integrity: sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] os: [linux] libc: [glibc] - '@rollup/rollup-linux-ppc64-musl@4.59.0': - resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==} - cpu: [ppc64] - os: [linux] - libc: [musl] - - '@rollup/rollup-linux-riscv64-gnu@4.59.0': - resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==} - cpu: [riscv64] + '@img/sharp-linux-arm@0.34.5': + resolution: {integrity: sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] os: [linux] libc: [glibc] - '@rollup/rollup-linux-riscv64-musl@4.59.0': - resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==} - cpu: [riscv64] - os: [linux] - libc: [musl] - - '@rollup/rollup-linux-s390x-gnu@4.59.0': - resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==} - cpu: [s390x] + '@img/sharp-linux-x64@0.34.5': + resolution: {integrity: sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] os: [linux] libc: [glibc] - '@rollup/rollup-linux-x64-gnu@4.59.0': - resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} - cpu: [x64] + '@img/sharp-linuxmusl-arm64@0.34.5': + resolution: {integrity: sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] os: [linux] - libc: [glibc] + libc: [musl] - '@rollup/rollup-linux-x64-musl@4.59.0': - resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==} + '@img/sharp-linuxmusl-x64@0.34.5': + resolution: {integrity: sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] libc: [musl] - '@rollup/rollup-openbsd-x64@4.59.0': - resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==} + '@img/sharp-win32-arm64@0.34.5': + resolution: {integrity: sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [win32] + + '@img/sharp-win32-x64@0.34.5': + resolution: {integrity: sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] - os: [openbsd] + os: [win32] - '@rollup/rollup-openharmony-arm64@4.59.0': - resolution: {integrity: sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==} - cpu: [arm64] - os: [openharmony] + '@isaacs/cliui@9.0.0': + resolution: {integrity: sha512-AokJm4tuBHillT+FpMtxQ60n8ObyXBatq7jD2/JA9dxbDDokKQm8KMht5ibGzLVU9IJDIKK4TPKgMHEYMn3lMg==} + engines: {node: '>=18'} - '@rollup/rollup-win32-arm64-msvc@4.59.0': - resolution: {integrity: sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==} + '@joshua.litt/get-ripgrep@0.0.3': + resolution: {integrity: sha512-rycdieAKKqXi2bsM7G2ayDiNk5CAX8ZOzsTQsirfOqUKPef04Xw40BWGGyimaOOuvPgLWYt3tPnLLG3TvPXi5Q==} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@js-sdsl/ordered-map@4.4.2': + resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==} + + '@keyv/serialize@1.1.1': + resolution: {integrity: sha512-dXn3FZhPv0US+7dtJsIi2R+c7qWYiReoEh5zUntWCf4oSpMNib8FDhSoed6m3QyZdx5hK7iLFkYk3rNxwt8vTA==} + + '@kwsites/file-exists@1.1.1': + resolution: {integrity: sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw==} + + '@kwsites/promise-deferred@1.1.1': + resolution: {integrity: sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==} + + '@lvce-editor/verror@1.7.0': + resolution: {integrity: sha512-+LGuAEIC2L7pbvkyAQVWM2Go0dAy+UWEui28g07zNtZsCBhm+gusBK8PNwLJLV5Jay+TyUYuwLIbJdjLLzqEBg==} + + '@lydell/node-pty-darwin-arm64@1.1.0': + resolution: {integrity: sha512-7kFD+owAA61qmhJCtoMbqj3Uvff3YHDiU+4on5F2vQdcMI3MuwGi7dM6MkFG/yuzpw8LF2xULpL71tOPUfxs0w==} cpu: [arm64] - os: [win32] + os: [darwin] - '@rollup/rollup-win32-ia32-msvc@4.59.0': - resolution: {integrity: sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==} - cpu: [ia32] - os: [win32] + '@lydell/node-pty-darwin-x64@1.1.0': + resolution: {integrity: sha512-XZdvqj5FjAMjH8bdp0YfaZjur5DrCIDD1VYiE9EkkYVMDQqRUPHYV3U8BVEQVT9hYfjmpr7dNaELF2KyISWSNA==} + cpu: [x64] + os: [darwin] - '@rollup/rollup-win32-x64-gnu@4.59.0': - resolution: {integrity: sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==} + '@lydell/node-pty-linux-arm64@1.1.0': + resolution: {integrity: sha512-yyDBmalCfHpLiQMT2zyLcqL2Fay4Xy7rIs8GH4dqKLnEviMvPGOK7LADVkKAsbsyXBSISL3Lt1m1MtxhPH6ckg==} + cpu: [arm64] + os: [linux] + + '@lydell/node-pty-linux-x64@1.1.0': + resolution: {integrity: sha512-NcNqRTD14QT+vXcEuqSSvmWY+0+WUBn2uRE8EN0zKtDpIEr9d+YiFj16Uqds6QfcLCHfZmC+Ls7YzwTaqDnanA==} cpu: [x64] + os: [linux] + + '@lydell/node-pty-win32-arm64@1.1.0': + resolution: {integrity: sha512-JOMbCou+0fA7d/m97faIIfIU0jOv8sn2OR7tI45u3AmldKoKoLP8zHY6SAvDDnI3fccO1R2HeR1doVjpS7HM0w==} + cpu: [arm64] os: [win32] - '@rollup/rollup-win32-x64-msvc@4.59.0': - resolution: {integrity: sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==} + '@lydell/node-pty-win32-x64@1.1.0': + resolution: {integrity: sha512-3N56BZ+WDFnUMYRtsrr7Ky2mhWGl9xXcyqR6cexfuCqcz9RNWL+KoXRv/nZylY5dYaXkft4JaR1uVu+roiZDAw==} cpu: [x64] os: [win32] - '@types/chai@5.2.3': - resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} + '@lydell/node-pty@1.1.0': + resolution: {integrity: sha512-VDD8LtlMTOrPKWMXUAcB9+LTktzuunqrMwkYR1DMRBkS6LQrCt+0/Ws1o2rMml/n3guePpS7cxhHF7Nm5K4iMw==} - '@types/deep-eql@4.0.2': - resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + '@modelcontextprotocol/sdk@1.27.1': + resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==} + engines: {node: '>=18'} + peerDependencies: + '@cfworker/json-schema': ^4.1.1 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + '@cfworker/json-schema': + optional: true - '@types/estree@1.0.8': - resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + '@opentelemetry/api-logs@0.203.0': + resolution: {integrity: sha512-9B9RU0H7Ya1Dx/Rkyc4stuBZSGVQF27WigitInx2QQoj6KUpEFYPKoWjdFTunJYxmXmh17HeBvbMa1EhGyPmqQ==} + engines: {node: '>=8.0.0'} - '@types/node@22.19.15': - resolution: {integrity: sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg==} + '@opentelemetry/api-logs@0.211.0': + resolution: {integrity: sha512-swFdZq8MCdmdR22jTVGQDhwqDzcI4M10nhjXkLr1EsIzXgZBqm4ZlmmcWsg3TSNf+3mzgOiqveXmBLZuDi2Lgg==} + engines: {node: '>=8.0.0'} - '@vitest/expect@3.2.4': - resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} + '@opentelemetry/api@1.9.0': + resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} + engines: {node: '>=8.0.0'} - '@vitest/mocker@3.2.4': - resolution: {integrity: sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==} + '@opentelemetry/configuration@0.211.0': + resolution: {integrity: sha512-PNsCkzsYQKyv8wiUIsH+loC4RYyblOaDnVASBtKS22hK55ToWs2UP6IsrcfSWWn54wWTvVe2gnfwz67Pvrxf2Q==} + engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: - msw: ^2.4.9 - vite: ^5.0.0 || ^6.0.0 || ^7.0.0-0 - peerDependenciesMeta: - msw: - optional: true - vite: - optional: true + '@opentelemetry/api': ^1.9.0 - '@vitest/pretty-format@3.2.4': - resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} + '@opentelemetry/context-async-hooks@2.0.1': + resolution: {integrity: sha512-XuY23lSI3d4PEqKA+7SLtAgwqIfc6E/E9eAQWLN1vlpC53ybO3o6jW4BsXo1xvz9lYyyWItfQDDLzezER01mCw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@vitest/runner@3.2.4': - resolution: {integrity: sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==} + '@opentelemetry/context-async-hooks@2.5.0': + resolution: {integrity: sha512-uOXpVX0ZjO7heSVjhheW2XEPrhQAWr2BScDPoZ9UDycl5iuHG+Usyc3AIfG6kZeC1GyLpMInpQ6X5+9n69yOFw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@vitest/snapshot@3.2.4': - resolution: {integrity: sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==} + '@opentelemetry/context-async-hooks@2.6.0': + resolution: {integrity: sha512-L8UyDwqpTcbkIK5cgwDRDYDoEhQoj8wp8BwsO19w3LB1Z41yEQm2VJyNfAi9DrLP/YTqXqWpKHyZfR9/tFYo1Q==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@vitest/spy@3.2.4': - resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} + '@opentelemetry/core@2.0.1': + resolution: {integrity: sha512-MaZk9SJIDgo1peKevlbhP6+IwIiNPNmswNL4AF0WaQJLbHXjr9SrZMgS12+iqr9ToV4ZVosCcc0f8Rg67LXjxw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@vitest/utils@3.2.4': - resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} + '@opentelemetry/core@2.5.0': + resolution: {integrity: sha512-ka4H8OM6+DlUhSAZpONu0cPBtPPTQKxbxVzC4CzVx5+K4JnroJVBtDzLAMx4/3CDTJXRvVFhpFjtl4SaiTNoyQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' - assertion-error@2.0.1: - resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} - engines: {node: '>=12'} + '@opentelemetry/core@2.6.0': + resolution: {integrity: sha512-HLM1v2cbZ4TgYN6KEOj+Bbj8rAKriOdkF9Ed3tG25FoprSiQl7kYc+RRT6fUZGOvx0oMi5U67GoFdT+XUn8zEg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' - cac@6.7.14: - resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} - engines: {node: '>=8'} + '@opentelemetry/exporter-logs-otlp-grpc@0.203.0': + resolution: {integrity: sha512-g/2Y2noc/l96zmM+g0LdeuyYKINyBwN6FJySoU15LHPLcMN/1a0wNk2SegwKcxrRdE7Xsm7fkIR5n6XFe3QpPw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 - chai@5.3.3: - resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} - engines: {node: '>=18'} + '@opentelemetry/exporter-logs-otlp-grpc@0.211.0': + resolution: {integrity: sha512-UhOoWENNqyaAMP/dL1YXLkXt6ZBtovkDDs1p4rxto9YwJX1+wMjwg+Obfyg2kwpcMoaiIFT3KQIcLNW8nNGNfQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 - check-error@2.1.3: - resolution: {integrity: sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==} - engines: {node: '>= 16'} + '@opentelemetry/exporter-logs-otlp-http@0.203.0': + resolution: {integrity: sha512-s0hys1ljqlMTbXx2XiplmMJg9wG570Z5lH7wMvrZX6lcODI56sG4HL03jklF63tBeyNwK2RV1/ntXGo3HgG4Qw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 - commander@10.0.1: - resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} - engines: {node: '>=14'} + '@opentelemetry/exporter-logs-otlp-http@0.211.0': + resolution: {integrity: sha512-c118Awf1kZirHkqxdcF+rF5qqWwNjJh+BB1CmQvN9AQHC/DUIldy6dIkJn3EKlQnQ3HmuNRKc/nHHt5IusN7mA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 - debug@4.4.3: - resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} - engines: {node: '>=6.0'} + '@opentelemetry/exporter-logs-otlp-proto@0.203.0': + resolution: {integrity: sha512-nl/7S91MXn5R1aIzoWtMKGvqxgJgepB/sH9qW0rZvZtabnsjbf8OQ1uSx3yogtvLr0GzwD596nQKz2fV7q2RBw==} + engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true + '@opentelemetry/api': ^1.3.0 - deep-eql@5.0.2: - resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} - engines: {node: '>=6'} + '@opentelemetry/exporter-logs-otlp-proto@0.211.0': + resolution: {integrity: sha512-kMvfKMtY5vJDXeLnwhrZMEwhZ2PN8sROXmzacFU/Fnl4Z79CMrOaL7OE+5X3SObRYlDUa7zVqaXp9ZetYCxfDQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 - es-module-lexer@1.7.0: - resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + '@opentelemetry/exporter-metrics-otlp-grpc@0.203.0': + resolution: {integrity: sha512-FCCj9nVZpumPQSEI57jRAA89hQQgONuoC35Lt+rayWY/mzCAc6BQT7RFyFaZKJ2B7IQ8kYjOCPsF/HGFWjdQkQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 - esbuild@0.27.3: - resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} - engines: {node: '>=18'} - hasBin: true + '@opentelemetry/exporter-metrics-otlp-grpc@0.211.0': + resolution: {integrity: sha512-D/U3G8L4PzZp8ot5hX9wpgbTymgtLZCiwR7heMe4LsbGV4OdctS1nfyvaQHLT6CiGZ6FjKc1Vk9s6kbo9SWLXQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 - estree-walker@3.0.3: - resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + '@opentelemetry/exporter-metrics-otlp-http@0.203.0': + resolution: {integrity: sha512-HFSW10y8lY6BTZecGNpV3GpoSy7eaO0Z6GATwZasnT4bEsILp8UJXNG5OmEsz4SdwCSYvyCbTJdNbZP3/8LGCQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-metrics-otlp-http@0.211.0': + resolution: {integrity: sha512-lfHXElPAoDSPpPO59DJdN5FLUnwi1wxluLTWQDayqrSPfWRnluzxRhD+g7rF8wbj1qCz0sdqABl//ug1IZyWvA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-metrics-otlp-proto@0.203.0': + resolution: {integrity: sha512-OZnhyd9npU7QbyuHXFEPVm3LnjZYifuKpT3kTnF84mXeEQ84pJJZgyLBpU4FSkSwUkt/zbMyNAI7y5+jYTWGIg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-metrics-otlp-proto@0.211.0': + resolution: {integrity: sha512-61iNbffEpyZv/abHaz3BQM3zUtA2kVIDBM+0dS9RK68ML0QFLRGYa50xVMn2PYMToyfszEPEgFC3ypGae2z8FA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-prometheus@0.203.0': + resolution: {integrity: sha512-2jLuNuw5m4sUj/SncDf/mFPabUxMZmmYetx5RKIMIQyPnl6G6ooFzfeE8aXNRf8YD1ZXNlCnRPcISxjveGJHNg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-prometheus@0.211.0': + resolution: {integrity: sha512-cD0WleEL3TPqJbvxwz5MVdVJ82H8jl8mvMad4bNU24cB5SH2mRW5aMLDTuV4614ll46R//R3RMmci26mc2L99g==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-grpc@0.203.0': + resolution: {integrity: sha512-322coOTf81bm6cAA8+ML6A+m4r2xTCdmAZzGNTboPXRzhwPt4JEmovsFAs+grpdarObd68msOJ9FfH3jxM6wqA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-grpc@0.211.0': + resolution: {integrity: sha512-eFwx4Gvu6LaEiE1rOd4ypgAiWEdZu7Qzm2QNN2nJqPW1XDeAVH1eNwVcVQl+QK9HR/JCDZ78PZgD7xD/DBDqbw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-http@0.203.0': + resolution: {integrity: sha512-ZDiaswNYo0yq/cy1bBLJFe691izEJ6IgNmkjm4C6kE9ub/OMQqDXORx2D2j8fzTBTxONyzusbaZlqtfmyqURPw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-http@0.211.0': + resolution: {integrity: sha512-F1Rv3JeMkgS//xdVjbQMrI3+26e5SXC7vXA6trx8SWEA0OUhw4JHB+qeHtH0fJn46eFItrYbL5m8j4qi9Sfaxw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-proto@0.203.0': + resolution: {integrity: sha512-1xwNTJ86L0aJmWRwENCJlH4LULMG2sOXWIVw+Szta4fkqKVY50Eo4HoVKKq6U9QEytrWCr8+zjw0q/ZOeXpcAQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-trace-otlp-proto@0.211.0': + resolution: {integrity: sha512-DkjXwbPiqpcPlycUojzG2RmR0/SIK8Gi9qWO9znNvSqgzrnAIE9x2n6yPfpZ+kWHZGafvsvA1lVXucTyyQa5Kg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/exporter-zipkin@2.0.1': + resolution: {integrity: sha512-a9eeyHIipfdxzCfc2XPrE+/TI3wmrZUDFtG2RRXHSbZZULAny7SyybSvaDvS77a7iib5MPiAvluwVvbGTsHxsw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/exporter-zipkin@2.5.0': + resolution: {integrity: sha512-bk9VJgFgUAzkZzU8ZyXBSWiUGLOM3mZEgKJ1+jsZclhRnAoDNf+YBdq+G9R3cP0+TKjjWad+vVrY/bE/vRR9lA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/instrumentation-http@0.203.0': + resolution: {integrity: sha512-y3uQAcCOAwnO6vEuNVocmpVzG3PER6/YZqbPbbffDdJ9te5NkHEkfSMNzlC3+v7KlE+WinPGc3N7MR30G1HY2g==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/instrumentation-http@0.211.0': + resolution: {integrity: sha512-n0IaQ6oVll9PP84SjbOCwDjaJasWRHi6BLsbMLiT6tNj7QbVOkuA5sk/EfZczwI0j5uTKl1awQPivO/ldVtsqA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/instrumentation@0.203.0': + resolution: {integrity: sha512-ke1qyM+3AK2zPuBPb6Hk/GCsc5ewbLvPNkEuELx/JmANeEp6ZjnZ+wypPAJSucTw0wvCGrUaibDSdcrGFoWxKQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/instrumentation@0.211.0': + resolution: {integrity: sha512-h0nrZEC/zvI994nhg7EgQ8URIHt0uDTwN90r3qQUdZORS455bbx+YebnGeEuFghUT0HlJSrLF4iHw67f+odY+Q==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-exporter-base@0.203.0': + resolution: {integrity: sha512-Wbxf7k+87KyvxFr5D7uOiSq/vHXWommvdnNE7vECO3tAhsA2GfOlpWINCMWUEPdHZ7tCXxw6Epp3vgx3jU7llQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-exporter-base@0.211.0': + resolution: {integrity: sha512-bp1+63V8WPV+bRI9EQG6E9YID1LIHYSZVbp7f+44g9tRzCq+rtw/o4fpL5PC31adcUsFiz/oN0MdLISSrZDdrg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-grpc-exporter-base@0.203.0': + resolution: {integrity: sha512-te0Ze1ueJF+N/UOFl5jElJW4U0pZXQ8QklgSfJ2linHN0JJsuaHG8IabEUi2iqxY8ZBDlSiz1Trfv5JcjWWWwQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-grpc-exporter-base@0.211.0': + resolution: {integrity: sha512-mR5X+N4SuphJeb7/K7y0JNMC8N1mB6gEtjyTLv+TSAhl0ZxNQzpSKP8S5Opk90fhAqVYD4R0SQSAirEBlH1KSA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-transformer@0.203.0': + resolution: {integrity: sha512-Y8I6GgoCna0qDQ2W6GCRtaF24SnvqvA8OfeTi7fqigD23u8Jpb4R5KFv/pRvrlGagcCLICMIyh9wiejp4TXu/A==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-transformer@0.211.0': + resolution: {integrity: sha512-julhCJ9dXwkOg9svuuYqqjXLhVaUgyUvO2hWbTxwjvLXX2rG3VtAaB0SzxMnGTuoCZizBT7Xqqm2V7+ggrfCXA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/propagator-b3@2.0.1': + resolution: {integrity: sha512-Hc09CaQ8Tf5AGLmf449H726uRoBNGPBL4bjr7AnnUpzWMvhdn61F78z9qb6IqB737TffBsokGAK1XykFEZ1igw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/propagator-b3@2.5.0': + resolution: {integrity: sha512-g10m4KD73RjHrSvUge+sUxUl8m4VlgnGc6OKvo68a4uMfaLjdFU+AULfvMQE/APq38k92oGUxEzBsAZ8RN/YHg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/propagator-jaeger@2.0.1': + resolution: {integrity: sha512-7PMdPBmGVH2eQNb/AtSJizQNgeNTfh6jQFqys6lfhd6P4r+m/nTh3gKPPpaCXVdRQ+z93vfKk+4UGty390283w==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/propagator-jaeger@2.5.0': + resolution: {integrity: sha512-t70ErZCncAR/zz5AcGkL0TF25mJiK1FfDPEQCgreyAHZ+mRJ/bNUiCnImIBDlP3mSDXy6N09DbUEKq0ktW98Hg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/resource-detector-gcp@0.40.3': + resolution: {integrity: sha512-C796YjBA5P1JQldovApYfFA/8bQwFfpxjUbOtGhn1YZkVTLoNQN+kvBwgALfTPWzug6fWsd0xhn9dzeiUcndag==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/resources@2.0.1': + resolution: {integrity: sha512-dZOB3R6zvBwDKnHDTB4X1xtMArB/d324VsbiPkX/Yu0Q8T2xceRthoIVFhJdvgVM2QhGVUyX9tzwiNxGtoBJUw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/resources@2.5.0': + resolution: {integrity: sha512-F8W52ApePshpoSrfsSk1H2yJn9aKjCrbpQF1M9Qii0GHzbfVeFUB+rc3X4aggyZD8x9Gu3Slua+s6krmq6Dt8g==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/resources@2.6.0': + resolution: {integrity: sha512-D4y/+OGe3JSuYUCBxtH5T9DSAWNcvCb/nQWIga8HNtXTVPQn59j0nTBAgaAXxUVBDl40mG3Tc76b46wPlZaiJQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-logs@0.203.0': + resolution: {integrity: sha512-vM2+rPq0Vi3nYA5akQD2f3QwossDnTDLvKbea6u/A2NZ3XDkPxMfo/PNrDoXhDUD/0pPo2CdH5ce/thn9K0kLw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-logs@0.211.0': + resolution: {integrity: sha512-O5nPwzgg2JHzo59kpQTPUOTzFi0Nv5LxryG27QoXBciX3zWM3z83g+SNOHhiQVYRWFSxoWn1JM2TGD5iNjOwdA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.0.1': + resolution: {integrity: sha512-wf8OaJoSnujMAHWR3g+/hGvNcsC16rf9s1So4JlMiFaFHiE4HpIA3oUh+uWZQ7CNuK8gVW/pQSkgoa5HkkOl0g==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.5.0': + resolution: {integrity: sha512-BeJLtU+f5Gf905cJX9vXFQorAr6TAfK3SPvTFqP+scfIpDQEJfRaGJWta7sJgP+m4dNtBf9y3yvBKVAZZtJQVA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + + '@opentelemetry/sdk-metrics@2.6.0': + resolution: {integrity: sha512-CicxWZxX6z35HR83jl+PLgtFgUrKRQ9LCXyxgenMnz5A1lgYWfAog7VtdOvGkJYyQgMNPhXQwkYrDLujk7z1Iw==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.9.0 <1.10.0' + + '@opentelemetry/sdk-node@0.203.0': + resolution: {integrity: sha512-zRMvrZGhGVMvAbbjiNQW3eKzW/073dlrSiAKPVWmkoQzah9wfynpVPeL55f9fVIm0GaBxTLcPeukWGy0/Wj7KQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-node@0.211.0': + resolution: {integrity: sha512-+s1eGjoqmPCMptNxcJJD4IxbWJKNLOQFNKhpwkzi2gLkEbCj6LzSHJNhPcLeBrBlBLtlSpibM+FuS7fjZ8SSFQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@2.0.1': + resolution: {integrity: sha512-xYLlvk/xdScGx1aEqvxLwf6sXQLXCjk3/1SQT9X9AoN5rXRhkdvIFShuNNmtTEPRBqcsMbS4p/gJLNI2wXaDuQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@2.5.0': + resolution: {integrity: sha512-VzRf8LzotASEyNDUxTdaJ9IRJ1/h692WyArDBInf5puLCjxbICD6XkHgpuudis56EndyS7LYFmtTMny6UABNdQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@2.6.0': + resolution: {integrity: sha512-g/OZVkqlxllgFM7qMKqbPV9c1DUPhQ7d4n3pgZFcrnrNft9eJXZM2TNHTPYREJBrtNdRytYyvwjgL5geDKl3EQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-node@2.0.1': + resolution: {integrity: sha512-UhdbPF19pMpBtCWYP5lHbTogLWx9N0EBxtdagvkn5YtsAnCBZzL7SjktG+ZmupRgifsHMjwUaCCaVmqGfSADmA==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-trace-node@2.5.0': + resolution: {integrity: sha512-O6N/ejzburFm2C84aKNrwJVPpt6HSTSq8T0ZUMq3xT2XmqT4cwxUItcL5UWGThYuq8RTcbH8u1sfj6dmRci0Ow==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-trace-node@2.6.0': + resolution: {integrity: sha512-YhswtasmsbIGEFvLGvR9p/y3PVRTfFf+mgY8van4Ygpnv4sA3vooAjvh+qAn9PNWxs4/IwGGqiQS0PPsaRJ0vQ==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/semantic-conventions@1.40.0': + resolution: {integrity: sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw==} + engines: {node: '>=14'} + + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + + '@rollup/rollup-android-arm-eabi@4.59.0': + resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.59.0': + resolution: {integrity: sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.59.0': + resolution: {integrity: sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.59.0': + resolution: {integrity: sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.59.0': + resolution: {integrity: sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.59.0': + resolution: {integrity: sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.59.0': + resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==} + cpu: [arm] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm-musleabihf@4.59.0': + resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==} + cpu: [arm] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-arm64-gnu@4.59.0': + resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm64-musl@4.59.0': + resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-loong64-gnu@4.59.0': + resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==} + cpu: [loong64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-loong64-musl@4.59.0': + resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==} + cpu: [loong64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-ppc64-gnu@4.59.0': + resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==} + cpu: [ppc64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-ppc64-musl@4.59.0': + resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==} + cpu: [ppc64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-riscv64-gnu@4.59.0': + resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==} + cpu: [riscv64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-riscv64-musl@4.59.0': + resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==} + cpu: [riscv64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-s390x-gnu@4.59.0': + resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==} + cpu: [s390x] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-gnu@4.59.0': + resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-musl@4.59.0': + resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==} + cpu: [x64] + os: [linux] + libc: [musl] + + '@rollup/rollup-openbsd-x64@4.59.0': + resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==} + cpu: [x64] + os: [openbsd] + + '@rollup/rollup-openharmony-arm64@4.59.0': + resolution: {integrity: sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.59.0': + resolution: {integrity: sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.59.0': + resolution: {integrity: sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.59.0': + resolution: {integrity: sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.59.0': + resolution: {integrity: sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==} + cpu: [x64] + os: [win32] + + '@sec-ant/readable-stream@0.4.1': + resolution: {integrity: sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==} + + '@selderee/plugin-htmlparser2@0.11.0': + resolution: {integrity: sha512-P33hHGdldxGabLFjPPpaTxVolMrzrcegejx+0GxjrIb9Zv48D8yAIA/QTDR2dFl7Uz7urX8aX6+5bCZslr+gWQ==} + + '@sindresorhus/is@7.2.0': + resolution: {integrity: sha512-P1Cz1dWaFfR4IR+U13mqqiGsLFf1KbayybWwdd2vfctdV6hDpUkgCY0nKOLLTMSoRd/jJNjtbqzf13K8DCCXQw==} + engines: {node: '>=18'} + + '@sindresorhus/merge-streams@4.0.0': + resolution: {integrity: sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==} + engines: {node: '>=18'} + + '@slack/bolt@4.6.0': + resolution: {integrity: sha512-xPgfUs2+OXSugz54Ky07pA890+Qydk22SYToi8uGpXeHSt1JWwFJkRyd/9Vlg5I1AdfdpGXExDpwnbuN9Q/2dQ==} + engines: {node: '>=18', npm: '>=8.6.0'} + peerDependencies: + '@types/express': ^5.0.0 + + '@slack/logger@4.0.1': + resolution: {integrity: sha512-6cmdPrV/RYfd2U0mDGiMK8S7OJqpCTm7enMLRR3edccsPX8j7zXTLnaEF4fhxxJJTAIOil6+qZrnUPTuaLvwrQ==} + engines: {node: '>= 18', npm: '>= 8.6.0'} + + '@slack/oauth@3.0.5': + resolution: {integrity: sha512-exqFQySKhNDptWYSWhvRUJ4/+ndu2gayIy7vg/JfmJq3wGtGdHk531P96fAZyBm5c1Le3yaPYqv92rL4COlU3A==} + engines: {node: '>=18', npm: '>=8.6.0'} + + '@slack/socket-mode@2.0.6': + resolution: {integrity: sha512-Aj5RO3MoYVJ+b2tUjHUXuA3tiIaCUMOf1Ss5tPiz29XYVUi6qNac2A8ulcU1pUPERpXVHTmT1XW6HzQIO74daQ==} + engines: {node: '>= 18', npm: '>= 8.6.0'} + + '@slack/types@2.20.1': + resolution: {integrity: sha512-eWX2mdt1ktpn8+40iiMc404uGrih+2fxiky3zBcPjtXKj6HLRdYlmhrPkJi7JTJm8dpXR6BWVWEDBXtaWMKD6A==} + engines: {node: '>= 12.13.0', npm: '>= 6.12.0'} + + '@slack/web-api@7.15.0': + resolution: {integrity: sha512-va7zYIt3QHG1x9M/jqXXRPFMoOVlVSSRHC5YH+DzKYsrz5xUKOA3lR4THsu/Zxha9N1jOndbKFKLtr0WOPW1Vw==} + engines: {node: '>= 18', npm: '>= 8.6.0'} + + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} + + '@tootallnate/once@2.0.0': + resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} + engines: {node: '>= 10'} + + '@types/body-parser@1.19.6': + resolution: {integrity: sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==} + + '@types/caseless@0.12.5': + resolution: {integrity: sha512-hWtVTC2q7hc7xZ/RLbxapMvDMgUnDvKvMOpKal4DrMyfGBUfB1oKaZlIRr6mJL+If3bAP6sV/QneGzF6tJjZDg==} + + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} + + '@types/connect@3.4.38': + resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} + + '@types/debug@4.1.13': + resolution: {integrity: sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw==} + + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/express-serve-static-core@5.1.1': + resolution: {integrity: sha512-v4zIMr/cX7/d2BpAEX3KNKL/JrT1s43s96lLvvdTmza1oEvDudCqK9aF/djc/SWgy8Yh0h30TZx5VpzqFCxk5A==} + + '@types/express@5.0.6': + resolution: {integrity: sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==} + + '@types/glob@8.1.0': + resolution: {integrity: sha512-IO+MJPVhoqz+28h1qLAcBEH2+xHMK6MTyHJc7MTnnYb6wsoLR29POVGJ7LycmVXIqyy/4/2ShP5sUwTXuOwb/w==} + + '@types/html-to-text@9.0.4': + resolution: {integrity: sha512-pUY3cKH/Nm2yYrEmDlPR1mR7yszjGx4DrwPjQ702C4/D5CwHuZTgZdIdwPkRbcuhs7BAh2L5rg3CL5cbRiGTCQ==} + + '@types/http-cache-semantics@4.2.0': + resolution: {integrity: sha512-L3LgimLHXtGkWikKnsPg0/VFx9OGZaC+eN1u4r+OB1XRqH3meBIAVC2zr1WdMH+RHmnRkqliQAOHNJ/E0j/e0Q==} + + '@types/http-errors@2.0.5': + resolution: {integrity: sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==} + + '@types/jsonwebtoken@9.0.10': + resolution: {integrity: sha512-asx5hIG9Qmf/1oStypjanR7iKTv0gXQ1Ov/jfrX6kS/EO0OFni8orbmGCn0672NHR3kXHwpAwR+B368ZGN/2rA==} + + '@types/long@4.0.2': + resolution: {integrity: sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==} + + '@types/minimatch@5.1.2': + resolution: {integrity: sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==} + + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + + '@types/node@22.19.15': + resolution: {integrity: sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg==} + + '@types/normalize-package-data@2.4.4': + resolution: {integrity: sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==} + + '@types/qs@6.15.0': + resolution: {integrity: sha512-JawvT8iBVWpzTrz3EGw9BTQFg3BQNmwERdKE22vlTxawwtbyUSlMppvZYKLZzB5zgACXdXxbD3m1bXaMqP/9ow==} + + '@types/range-parser@1.2.7': + resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} + + '@types/request@2.48.13': + resolution: {integrity: sha512-FGJ6udDNUCjd19pp0Q3iTiDkwhYup7J8hpMW9c4k53NrccQFFWKRho6hvtPPEhnXWKvukfwAlB6DbDz4yhH5Gg==} + + '@types/retry@0.12.0': + resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==} + + '@types/send@1.2.1': + resolution: {integrity: sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==} + + '@types/serve-static@2.2.0': + resolution: {integrity: sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ==} + + '@types/tough-cookie@4.0.5': + resolution: {integrity: sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==} + + '@types/ws@8.18.1': + resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} + + '@types/yauzl@2.10.3': + resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} + + '@vercel/oidc@3.1.0': + resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==} + engines: {node: '>= 20'} + + '@vitest/expect@3.2.4': + resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} + + '@vitest/mocker@3.2.4': + resolution: {integrity: sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 || ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + + '@vitest/pretty-format@3.2.4': + resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} + + '@vitest/runner@3.2.4': + resolution: {integrity: sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==} + + '@vitest/snapshot@3.2.4': + resolution: {integrity: sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==} + + '@vitest/spy@3.2.4': + resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} + + '@vitest/utils@3.2.4': + resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} + + '@xterm/headless@5.5.0': + resolution: {integrity: sha512-5xXB7kdQlFBP82ViMJTwwEc3gKCLGKR/eoxQm4zge7GPBl86tCdI0IdPJjoKd8mUSFXz5V7i/25sfsEkP4j46g==} + + abort-controller@3.0.0: + resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} + engines: {node: '>=6.5'} + + accepts@2.0.0: + resolution: {integrity: sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==} + engines: {node: '>= 0.6'} + + acorn-import-attributes@1.9.5: + resolution: {integrity: sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==} + peerDependencies: + acorn: ^8 + + acorn@8.16.0: + resolution: {integrity: sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==} + engines: {node: '>=0.4.0'} + hasBin: true + + agent-base@6.0.2: + resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} + engines: {node: '>= 6.0.0'} + + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + + ai-sdk-provider-claude-code@3.4.4: + resolution: {integrity: sha512-iHcup5SHh4Tul1RIi9J+bnpngen8WX66yC3lsz1YlbtwAmRhUEzZUuGKzmFGIN8Pmx9uQrerGfLJdbFxIxKkyw==} + engines: {node: '>=18'} + peerDependencies: + zod: ^4.0.0 + + ai-sdk-provider-gemini-cli@2.0.1: + resolution: {integrity: sha512-v9Oc9irtWalFjODdj6nUFg0ifNJYm6IiWoafNdsJINmgE2k5JC0gEouypPsGoX9RAkIlOsJiE3ujbd+6nUqXxw==} + engines: {node: '>=20'} + peerDependencies: + zod: ^3.0.0 || ^4.0.0 + + ai@6.0.116: + resolution: {integrity: sha512-7yM+cTmyRLeNIXwt4Vj+mrrJgVQ9RMIW5WO0ydoLoYkewIvsMcvUmqS4j2RJTUXaF1HphwmSKUMQ/HypNRGOmA==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + + ajv-formats@3.0.1: + resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==} + peerDependencies: + ajv: ^8.0.0 + peerDependenciesMeta: + ajv: + optional: true + + ajv@8.18.0: + resolution: {integrity: sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-regex@6.2.2: + resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==} + engines: {node: '>=12'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + arrify@2.0.1: + resolution: {integrity: sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==} + engines: {node: '>=8'} + + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + axios@1.13.6: + resolution: {integrity: sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==} + + balanced-match@4.0.4: + resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==} + engines: {node: 18 || 20 || >=22} + + base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + + bignumber.js@9.3.1: + resolution: {integrity: sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==} + + bl@4.1.0: + resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} + + body-parser@2.2.2: + resolution: {integrity: sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==} + engines: {node: '>=18'} + + brace-expansion@5.0.4: + resolution: {integrity: sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==} + engines: {node: 18 || 20 || >=22} + + buffer-crc32@0.2.13: + resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==} + + buffer-equal-constant-time@1.0.1: + resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} + + buffer@5.7.1: + resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + + bundle-name@4.1.0: + resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} + engines: {node: '>=18'} + + byte-counter@0.1.0: + resolution: {integrity: sha512-jheRLVMeUKrDBjVw2O5+k4EvR4t9wtxHL+bo/LxfkxsVeuGMy3a5SEGgXdAFA4FSzTrU8rQXQIrsZ3oBq5a0pQ==} + engines: {node: '>=20'} + + bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + + cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} + + cacheable-lookup@7.0.0: + resolution: {integrity: sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==} + engines: {node: '>=14.16'} + + cacheable-request@13.0.18: + resolution: {integrity: sha512-rFWadDRKJs3s2eYdXlGggnBZKG7MTblkFBB0YllFds+UYnfogDp2wcR6JN97FhRkHTvq59n2vhNoHNZn29dh/Q==} + engines: {node: '>=18'} + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + call-bound@1.0.4: + resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==} + engines: {node: '>= 0.4'} + + chai@5.3.3: + resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} + engines: {node: '>=18'} + + chardet@2.1.1: + resolution: {integrity: sha512-PsezH1rqdV9VvyNhxxOW32/d75r01NY7TQCmOqomRo15ZSOKbpTFVsfjghxo6JloQUCGnH4k1LGu0R4yCLlWQQ==} + + check-error@2.1.3: + resolution: {integrity: sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==} + engines: {node: '>= 16'} + + chownr@1.1.4: + resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==} + + cjs-module-lexer@1.4.3: + resolution: {integrity: sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==} + + cjs-module-lexer@2.2.0: + resolution: {integrity: sha512-4bHTS2YuzUvtoLjdy+98ykbNB5jS0+07EvFNXerqZQJ89F7DI6ET7OQo/HJuW6K0aVsKA9hj9/RVb2kQVOrPDQ==} + + cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + commander@10.0.1: + resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} + engines: {node: '>=14'} + + content-disposition@1.0.1: + resolution: {integrity: sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==} + engines: {node: '>=18'} + + content-type@1.0.5: + resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} + engines: {node: '>= 0.6'} + + cookie-signature@1.2.2: + resolution: {integrity: sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==} + engines: {node: '>=6.6.0'} + + cookie@0.7.2: + resolution: {integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==} + engines: {node: '>= 0.6'} + + cors@2.8.6: + resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==} + engines: {node: '>= 0.10'} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + data-uri-to-buffer@4.0.1: + resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==} + engines: {node: '>= 12'} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decompress-response@10.0.0: + resolution: {integrity: sha512-oj7KWToJuuxlPr7VV0vabvxEIiqNMo+q0NueIiL3XhtwC6FVOX7Hr1c0C4eD0bmf7Zr+S/dSf2xvkH3Ad6sU3Q==} + engines: {node: '>=20'} + + decompress-response@6.0.0: + resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} + engines: {node: '>=10'} + + deep-eql@5.0.2: + resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} + engines: {node: '>=6'} + + deep-extend@0.6.0: + resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} + engines: {node: '>=4.0.0'} + + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + + default-browser-id@5.0.1: + resolution: {integrity: sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==} + engines: {node: '>=18'} + + default-browser@5.5.0: + resolution: {integrity: sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw==} + engines: {node: '>=18'} + + define-lazy-prop@3.0.0: + resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} + engines: {node: '>=12'} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + depd@2.0.0: + resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} + engines: {node: '>= 0.8'} + + detect-libc@2.1.2: + resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} + engines: {node: '>=8'} + + diff@7.0.0: + resolution: {integrity: sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw==} + engines: {node: '>=0.3.1'} + + diff@8.0.3: + resolution: {integrity: sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ==} + engines: {node: '>=0.3.1'} + + dom-serializer@2.0.0: + resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} + + domelementtype@2.3.0: + resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} + + domhandler@5.0.3: + resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} + engines: {node: '>= 4'} + + domutils@3.2.2: + resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} + + dot-prop@6.0.1: + resolution: {integrity: sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==} + engines: {node: '>=10'} + + dotenv-expand@12.0.3: + resolution: {integrity: sha512-uc47g4b+4k/M/SeaW1y4OApx+mtLWl92l5LMPP0GNXctZqELk+YGgOPIIC5elYmUH4OuoK3JLhuRUYegeySiFA==} + engines: {node: '>=12'} + + dotenv@16.6.1: + resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} + engines: {node: '>=12'} + + dotenv@17.3.1: + resolution: {integrity: sha512-IO8C/dzEb6O3F9/twg6ZLXz164a2fhTnEWb95H23Dm4OuN+92NmEAlTrupP9VW6Jm3sO26tQlqyvyi4CsnY9GA==} + engines: {node: '>=12'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + duplexify@4.1.3: + resolution: {integrity: sha512-M3BmBhwJRZsSx38lZyhE53Csddgzl5R7xGJNk7CVddZD6CcmwMCH8J+7AprIrQKH7TonKxaCjcv27Qmf+sQ+oA==} + + ecdsa-sig-formatter@1.0.11: + resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} + + ee-first@1.1.1: + resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + encodeurl@2.0.0: + resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} + engines: {node: '>= 0.8'} + + end-of-stream@1.4.5: + resolution: {integrity: sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==} + + entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} + engines: {node: '>=0.12'} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + esbuild@0.27.3: + resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} + engines: {node: '>=18'} + hasBin: true + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + etag@1.8.1: + resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} + engines: {node: '>= 0.6'} + + event-target-shim@5.0.1: + resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} + engines: {node: '>=6'} + + eventemitter3@4.0.7: + resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} + + eventemitter3@5.0.4: + resolution: {integrity: sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==} + + eventid@2.0.1: + resolution: {integrity: sha512-sPNTqiMokAvV048P2c9+foqVJzk49o6d4e0D/sq5jog3pw+4kBgyR0gaM1FM7Mx6Kzd9dztesh9oYz1LWWOpzw==} + engines: {node: '>=10'} + + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.7: + resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==} + engines: {node: '>=18.0.0'} + + execa@9.6.1: + resolution: {integrity: sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA==} + engines: {node: ^18.19.0 || >=20.5.0} + + expand-template@2.0.3: + resolution: {integrity: sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==} + engines: {node: '>=6'} + + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} + + express-rate-limit@8.3.1: + resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==} + engines: {node: '>= 16'} + peerDependencies: + express: '>= 4.11' + + express@5.2.1: + resolution: {integrity: sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==} + engines: {node: '>= 18'} + + extend@3.0.2: + resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + + extract-zip@2.0.1: + resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==} + engines: {node: '>= 10.17.0'} + hasBin: true + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fast-uri@3.1.0: + resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} + + fd-slicer@1.1.0: + resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==} + + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + fetch-blob@3.2.0: + resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} + engines: {node: ^12.20 || >= 14.13} + + figures@6.1.0: + resolution: {integrity: sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==} + engines: {node: '>=18'} + + finalhandler@2.1.1: + resolution: {integrity: sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==} + engines: {node: '>= 18.0.0'} + + find-up-simple@1.0.1: + resolution: {integrity: sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ==} + engines: {node: '>=18'} + + follow-redirects@1.15.11: + resolution: {integrity: sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + foreground-child@3.3.1: + resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} + engines: {node: '>=14'} + + form-data-encoder@4.1.0: + resolution: {integrity: sha512-G6NsmEW15s0Uw9XnCg+33H3ViYRyiM0hMrMhhqQOR8NFc5GhYrI+6I3u7OTw7b91J2g8rtvMBZJDbcGb2YUniw==} + engines: {node: '>= 18'} + + form-data@2.5.5: + resolution: {integrity: sha512-jqdObeR2rxZZbPSGL+3VckHMYtu+f9//KXBsVny6JSX/pa38Fy+bGjuG8eW/H6USNQWhLi8Num++cU2yOCNz4A==} + engines: {node: '>= 0.12'} + + form-data@4.0.5: + resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} + engines: {node: '>= 6'} + + formdata-polyfill@4.0.10: + resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} + engines: {node: '>=12.20.0'} + + forwarded-parse@2.1.2: + resolution: {integrity: sha512-alTFZZQDKMporBH77856pXgzhEzaUVmLCDk+egLgIgHst3Tpndzz8MnKe+GzRJRfvVdn69HhpW7cmXzvtLvJAw==} + + forwarded@0.2.0: + resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} + engines: {node: '>= 0.6'} + + fresh@2.0.0: + resolution: {integrity: sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==} + engines: {node: '>= 0.8'} + + fs-constants@1.0.0: + resolution: {integrity: sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==} + + fs-extra@11.3.4: + resolution: {integrity: sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA==} + engines: {node: '>=14.14'} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + fzf@0.5.2: + resolution: {integrity: sha512-Tt4kuxLXFKHy8KT40zwsUPUkg1CrsgY25FxA2U/j/0WgEDCk3ddc/zLTCCcbSHX9FcKtLuVaDGtGE/STWC+j3Q==} + + gaxios@6.7.1: + resolution: {integrity: sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==} + engines: {node: '>=14'} + + gaxios@7.1.4: + resolution: {integrity: sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA==} + engines: {node: '>=18'} + + gcp-metadata@6.1.1: + resolution: {integrity: sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==} + engines: {node: '>=14'} + + gcp-metadata@8.1.2: + resolution: {integrity: sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==} + engines: {node: '>=18'} + + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + get-stream@5.2.0: + resolution: {integrity: sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==} + engines: {node: '>=8'} + + get-stream@9.0.1: + resolution: {integrity: sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==} + engines: {node: '>=18'} + + github-from-package@0.0.0: + resolution: {integrity: sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==} + + glob@12.0.0: + resolution: {integrity: sha512-5Qcll1z7IKgHr5g485ePDdHcNQY0k2dtv/bjYy0iuyGxQw2qSOiiXUXJ+AYQpg3HNoUMHqAruX478Jeev7UULw==} + engines: {node: 20 || >=22} + hasBin: true + + google-auth-library@10.6.2: + resolution: {integrity: sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw==} + engines: {node: '>=18'} + + google-auth-library@9.15.1: + resolution: {integrity: sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng==} + engines: {node: '>=14'} + + google-gax@4.6.1: + resolution: {integrity: sha512-V6eky/xz2mcKfAd1Ioxyd6nmA61gao3n01C+YeuIwu3vzM9EDR6wcVzMSIbLMDXWeoi9SHYctXuKYC5uJUT3eQ==} + engines: {node: '>=14'} + + google-logging-utils@0.0.2: + resolution: {integrity: sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ==} + engines: {node: '>=14'} + + google-logging-utils@1.1.3: + resolution: {integrity: sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==} + engines: {node: '>=14'} + + googleapis-common@7.2.0: + resolution: {integrity: sha512-/fhDZEJZvOV3X5jmD+fKxMqma5q2Q9nZNSF3kn1F18tpxmA86BcTxAGBQdM0N89Z3bEaIs+HVznSmFJEAmMTjA==} + engines: {node: '>=14.0.0'} + + googleapis@137.1.0: + resolution: {integrity: sha512-2L7SzN0FLHyQtFmyIxrcXhgust77067pkkduqkbIpDuj9JzVnByxsRrcRfUMFQam3rQkWW2B0f1i40IwKDWIVQ==} + engines: {node: '>=14.0.0'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + got@14.6.6: + resolution: {integrity: sha512-QLV1qeYSo5l13mQzWgP/y0LbMr5Plr5fJilgAIwgnwseproEbtNym8xpLsDzeZ6MWXgNE6kdWGBjdh3zT/Qerg==} + engines: {node: '>=20'} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + graphql@16.13.1: + resolution: {integrity: sha512-gGgrVCoDKlIZ8fIqXBBb0pPKqDgki0Z/FSKNiQzSGj2uEYHr1tq5wmBegGwJx6QB5S5cM0khSBpi/JFHMCvsmQ==} + engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + + gtoken@7.1.0: + resolution: {integrity: sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==} + engines: {node: '>=14.0.0'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + hono@4.12.8: + resolution: {integrity: sha512-VJCEvtrezO1IAR+kqEYnxUOoStaQPGrCmX3j4wDTNOcD1uRPFpGlwQUIW8niPuvHXaTUxeOUl5MMDGrl+tmO9A==} + engines: {node: '>=16.9.0'} + + hosted-git-info@7.0.2: + resolution: {integrity: sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==} + engines: {node: ^16.14.0 || >=18.0.0} + + html-entities@2.6.0: + resolution: {integrity: sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ==} + + html-to-text@9.0.5: + resolution: {integrity: sha512-qY60FjREgVZL03vJU6IfMV4GDjGBIoOyvuFdpBDIX9yTlDw0TjxVBQp+P8NvpdIXNJvfWBTNul7fsAQJq2FNpg==} + engines: {node: '>=14'} + + htmlparser2@8.0.2: + resolution: {integrity: sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==} + + http-cache-semantics@4.2.0: + resolution: {integrity: sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==} + + http-errors@2.0.1: + resolution: {integrity: sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==} + engines: {node: '>= 0.8'} + + http-proxy-agent@5.0.0: + resolution: {integrity: sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==} + engines: {node: '>= 6'} + + http2-wrapper@2.2.1: + resolution: {integrity: sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==} + engines: {node: '>=10.19.0'} + + https-proxy-agent@5.0.1: + resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} + engines: {node: '>= 6'} + + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + + human-signals@8.0.1: + resolution: {integrity: sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==} + engines: {node: '>=18.18.0'} + + iconv-lite@0.7.2: + resolution: {integrity: sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==} + engines: {node: '>=0.10.0'} + + ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + + ignore@7.0.5: + resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==} + engines: {node: '>= 4'} + + import-in-the-middle@1.15.0: + resolution: {integrity: sha512-bpQy+CrsRmYmoPMAE/0G33iwRqwW4ouqdRg8jgbH3aKuCtOc8lxgmYXg2dMM92CRiGP660EtBcymH/eVUpCSaA==} + + import-in-the-middle@2.0.6: + resolution: {integrity: sha512-3vZV3jX0XRFW3EJDTwzWoZa+RH1b8eTTx6YOCjglrLyPuepwoBti1k3L2dKwdCUrnVEfc5CuRuGstaC/uQJJaw==} + + index-to-position@1.2.0: + resolution: {integrity: sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw==} + engines: {node: '>=18'} + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + ini@1.3.8: + resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} + + ip-address@10.1.0: + resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==} + engines: {node: '>= 12'} + + ipaddr.js@1.9.1: + resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} + engines: {node: '>= 0.10'} + + is-core-module@2.16.1: + resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} + engines: {node: '>= 0.4'} + + is-docker@3.0.0: + resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + hasBin: true + + is-electron@2.2.2: + resolution: {integrity: sha512-FO/Rhvz5tuw4MCWkpMzHFKWD2LsfHzIb7i6MdPYZ/KW7AlxawyLkqdy+jPZP1WubqEADE3O4FUENlJHDfQASRg==} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-inside-container@1.0.0: + resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} + engines: {node: '>=14.16'} + hasBin: true + + is-obj@2.0.0: + resolution: {integrity: sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==} + engines: {node: '>=8'} + + is-plain-obj@4.1.0: + resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==} + engines: {node: '>=12'} + + is-promise@4.0.0: + resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==} + + is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + + is-stream@4.0.1: + resolution: {integrity: sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==} + engines: {node: '>=18'} + + is-unicode-supported@2.1.0: + resolution: {integrity: sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==} + engines: {node: '>=18'} + + is-wsl@3.1.1: + resolution: {integrity: sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==} + engines: {node: '>=16'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + jackspeak@4.2.3: + resolution: {integrity: sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg==} + engines: {node: 20 || >=22} + + jose@6.2.1: + resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==} + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-tokens@9.0.1: + resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} + + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + + json-bigint@1.0.0: + resolution: {integrity: sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==} + + json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + + json-schema-typed@8.0.2: + resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} + + json-schema@0.4.0: + resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} + + jsonfile@6.2.0: + resolution: {integrity: sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==} + + jsonwebtoken@9.0.3: + resolution: {integrity: sha512-MT/xP0CrubFRNLNKvxJ2BYfy53Zkm++5bX9dtuPbqAeQpTVe0MQTFhao8+Cp//EmJp244xt6Drw/GVEGCUj40g==} + engines: {node: '>=12', npm: '>=6'} + + jwa@2.0.1: + resolution: {integrity: sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==} + + jws@4.0.1: + resolution: {integrity: sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==} + + keytar@7.9.0: + resolution: {integrity: sha512-VPD8mtVtm5JNtA2AErl6Chp06JBfy7diFQ7TQQhdpWOl6MrCRB+eRbvAZUsbGQS9kiMq0coJsy0W0vHpDCkWsQ==} + + keyv@5.6.0: + resolution: {integrity: sha512-CYDD3SOtsHtyXeEORYRx2qBtpDJFjRTGXUtmNEMGyzYOKj1TE3tycdlho7kA1Ufx9OYWZzg52QFBGALTirzDSw==} + + leac@0.6.0: + resolution: {integrity: sha512-y+SqErxb8h7nE/fiEX07jsbuhrpO9lL8eca7/Y1nuWV2moNlXhyd59iDGcRf6moVyDMbmTNzL40SUyrFU/yDpg==} + + liquidjs@10.24.0: + resolution: {integrity: sha512-TAUNAdgwaAXjjcUFuYVJm9kOVH7zc0mTKxsG9t9Lu4qdWjB2BEblyVIYpjWcmJLMGgiYqnGNJjpNMHx0gp/46A==} + engines: {node: '>=16'} + hasBin: true + + lodash.camelcase@4.3.0: + resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} + + lodash.includes@4.3.0: + resolution: {integrity: sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==} + + lodash.isboolean@3.0.3: + resolution: {integrity: sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==} + + lodash.isinteger@4.0.4: + resolution: {integrity: sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==} + + lodash.isnumber@3.0.3: + resolution: {integrity: sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.isstring@4.0.1: + resolution: {integrity: sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==} + + lodash.once@4.1.1: + resolution: {integrity: sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==} + + long@5.3.2: + resolution: {integrity: sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==} + + loupe@3.2.1: + resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} + + lowercase-keys@3.0.0: + resolution: {integrity: sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + + lru-cache@11.2.7: + resolution: {integrity: sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==} + engines: {node: 20 || >=22} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + marked@15.0.12: + resolution: {integrity: sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA==} + engines: {node: '>= 18'} + hasBin: true + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + media-typer@1.1.0: + resolution: {integrity: sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==} + engines: {node: '>= 0.8'} + + merge-descriptors@2.0.0: + resolution: {integrity: sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==} + engines: {node: '>=18'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-db@1.54.0: + resolution: {integrity: sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + mime-types@3.0.2: + resolution: {integrity: sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==} + engines: {node: '>=18'} + + mime@4.0.7: + resolution: {integrity: sha512-2OfDPL+e03E0LrXaGYOtTFIYhiuzep94NSsuhrNULq+stylcJedcHdzHtz0atMUuGwJfFYs0YL5xeC/Ca2x0eQ==} + engines: {node: '>=16'} + hasBin: true + + mimic-response@3.1.0: + resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==} + engines: {node: '>=10'} + + mimic-response@4.0.0: + resolution: {integrity: sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + minimatch@10.2.4: + resolution: {integrity: sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==} + engines: {node: 18 || 20 || >=22} + + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + + minipass@7.1.3: + resolution: {integrity: sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==} + engines: {node: '>=16 || 14 >=14.17'} + + mkdirp-classic@0.5.3: + resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==} + + mnemonist@0.40.3: + resolution: {integrity: sha512-Vjyr90sJ23CKKH/qPAgUKicw/v6pRoamxIEDFOF8uSgFME7DqPRpHgRTejWVjkdGg5dXj0/NyxZHZ9bcjH+2uQ==} + + module-details-from-path@1.0.4: + resolution: {integrity: sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + napi-build-utils@2.0.0: + resolution: {integrity: sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==} + + negotiator@1.0.0: + resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==} + engines: {node: '>= 0.6'} + + node-abi@3.89.0: + resolution: {integrity: sha512-6u9UwL0HlAl21+agMN3YAMXcKByMqwGx+pq+P76vii5f7hTPtKDp08/H9py6DY+cfDw7kQNTGEj/rly3IgbNQA==} + engines: {node: '>=10'} + + node-addon-api@4.3.0: + resolution: {integrity: sha512-73sE9+3UaLYYFmDsFZnqCInzPyh3MqIwZO9cw58yIqAZhONrrabrYyYe3TuIqtIiOuTXVhsGau8hcrhhwSsDIQ==} + + node-addon-api@7.1.1: + resolution: {integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==} + + node-addon-api@8.6.0: + resolution: {integrity: sha512-gBVjCaqDlRUk0EwoPNKzIr9KkS9041G/q31IBShPs1Xz6UTA+EXdZADbzqAJQrpDRq71CIMnOP5VMut3SL0z5Q==} + engines: {node: ^18 || ^20 || >= 21} + + node-domexception@1.0.0: + resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} + engines: {node: '>=10.5.0'} + deprecated: Use your platform's native DOMException instead + + node-fetch@2.7.0: + resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + + node-fetch@3.3.2: + resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + node-gyp-build@4.8.4: + resolution: {integrity: sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ==} + hasBin: true + + node-pty@1.1.0: + resolution: {integrity: sha512-20JqtutY6JPXTUnL0ij1uad7Qe1baT46lyolh2sSENDd4sTzKZ4nmAFkeAARDKwmlLjPx6XKRlwRUxwjOy+lUg==} + + normalize-package-data@6.0.2: + resolution: {integrity: sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==} + engines: {node: ^16.14.0 || >=18.0.0} + + normalize-url@8.1.1: + resolution: {integrity: sha512-JYc0DPlpGWB40kH5g07gGTrYuMqV653k3uBKY6uITPWds3M0ov3GaWGp9lbE3Bzngx8+XkfzgvASb9vk9JDFXQ==} + engines: {node: '>=14.16'} + + npm-run-path@6.0.0: + resolution: {integrity: sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==} + engines: {node: '>=18'} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-hash@3.0.0: + resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} + engines: {node: '>= 6'} + + object-inspect@1.13.4: + resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} + engines: {node: '>= 0.4'} + + obliterator@2.0.5: + resolution: {integrity: sha512-42CPE9AhahZRsMNslczq0ctAEtqk8Eka26QofnqC346BZdHDySk3LWka23LI7ULIw11NmltpiLagIq8gBozxTw==} + + on-finished@2.4.1: + resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} + engines: {node: '>= 0.8'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + open@10.2.0: + resolution: {integrity: sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==} + engines: {node: '>=18'} + + p-cancelable@4.0.1: + resolution: {integrity: sha512-wBowNApzd45EIKdO1LaU+LrMBwAcjfPaYtVzV3lmfM3gf8Z4CHZsiIqlM8TZZ8okYvh5A1cP6gTfCRQtwUpaUg==} + engines: {node: '>=14.16'} + + p-finally@1.0.0: + resolution: {integrity: sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==} + engines: {node: '>=4'} + + p-queue@6.6.2: + resolution: {integrity: sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==} + engines: {node: '>=8'} + + p-retry@4.6.2: + resolution: {integrity: sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==} + engines: {node: '>=8'} + + p-timeout@3.2.0: + resolution: {integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==} + engines: {node: '>=8'} + + package-json-from-dist@1.0.1: + resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} + + parse-json@8.3.0: + resolution: {integrity: sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==} + engines: {node: '>=18'} + + parse-ms@4.0.0: + resolution: {integrity: sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==} + engines: {node: '>=18'} + + parseley@0.12.1: + resolution: {integrity: sha512-e6qHKe3a9HWr0oMRVDTRhKce+bRO8VGQR3NyVwcjwrbhMmFCX9KszEV35+rn4AdilFAq9VPxP/Fe1wC9Qjd2lw==} + + parseurl@1.3.3: + resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} + engines: {node: '>= 0.8'} + + path-exists@5.0.0: + resolution: {integrity: sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-key@4.0.0: + resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==} + engines: {node: '>=12'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + path-scurry@2.0.2: + resolution: {integrity: sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==} + engines: {node: 18 || 20 || >=22} + + path-to-regexp@8.3.0: + resolution: {integrity: sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==} + + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + pathval@2.0.1: + resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} + engines: {node: '>= 14.16'} + + peberminta@0.9.0: + resolution: {integrity: sha512-XIxfHpEuSJbITd1H3EeQwpcZbTLHc+VVr8ANI9t5sit565tsI4/xK3KWTUFE2e6QiangUkh3B0jihzmGnNrRsQ==} + + pend@1.2.0: + resolution: {integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + pkce-challenge@5.0.1: + resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} + engines: {node: '>=16.20.0'} + + postcss@8.5.8: + resolution: {integrity: sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==} + engines: {node: ^10 || ^12 || >=14} + + prebuild-install@7.1.3: + resolution: {integrity: sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==} + engines: {node: '>=10'} + deprecated: No longer maintained. Please contact the author of the relevant native addon; alternatives are available. + hasBin: true + + pretty-ms@9.3.0: + resolution: {integrity: sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==} + engines: {node: '>=18'} + + proper-lockfile@4.1.2: + resolution: {integrity: sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA==} + + proto3-json-serializer@2.0.2: + resolution: {integrity: sha512-SAzp/O4Yh02jGdRc+uIrGoe87dkN/XtwxfZ4ZyafJHymd79ozp5VG5nyZ7ygqPM5+cpLDjjGnYFUkngonyDPOQ==} + engines: {node: '>=14.0.0'} + + protobufjs@7.5.4: + resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} + engines: {node: '>=12.0.0'} + + protobufjs@8.0.0: + resolution: {integrity: sha512-jx6+sE9h/UryaCZhsJWbJtTEy47yXoGNYI4z8ZaRncM0zBKeRqjO2JEcOUYwrYGb1WLhXM1FfMzW3annvFv0rw==} + engines: {node: '>=12.0.0'} + + proxy-addr@2.0.7: + resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} + engines: {node: '>= 0.10'} + + proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + + pump@3.0.4: + resolution: {integrity: sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA==} + + pumpify@2.0.1: + resolution: {integrity: sha512-m7KOje7jZxrmutanlkS1daj1dS6z6BgslzOXmcSEpIlCxM3VJH7lG5QLeck/6hgF6F4crFf01UtQmNsJfweTAw==} + + qs@6.15.0: + resolution: {integrity: sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ==} + engines: {node: '>=0.6'} + + quick-lru@5.1.1: + resolution: {integrity: sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==} + engines: {node: '>=10'} + + range-parser@1.2.1: + resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} + engines: {node: '>= 0.6'} + + raw-body@3.0.2: + resolution: {integrity: sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==} + engines: {node: '>= 0.10'} + + rc@1.2.8: + resolution: {integrity: sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==} + hasBin: true + + read-package-up@11.0.0: + resolution: {integrity: sha512-MbgfoNPANMdb4oRBNg5eqLbB2t2r+o5Ua1pNt8BqGp4I0FJZhuVSOj3PaBPni4azWuSzEdNn2evevzVmEk1ohQ==} + engines: {node: '>=18'} + + read-pkg@9.0.1: + resolution: {integrity: sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA==} + engines: {node: '>=18'} + + readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: '>= 6'} + + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + + require-in-the-middle@7.5.2: + resolution: {integrity: sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==} + engines: {node: '>=8.6.0'} + + require-in-the-middle@8.0.1: + resolution: {integrity: sha512-QT7FVMXfWOYFbeRBF6nu+I6tr2Tf3u0q8RIEjNob/heKY/nh7drD/k7eeMFmSQgnTtCzLDcCu/XEnpW2wk4xCQ==} + engines: {node: '>=9.3.0 || >=8.10.0 <9.0.0'} + + resolve-alpn@1.2.1: + resolution: {integrity: sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==} + + resolve@1.22.11: + resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==} + engines: {node: '>= 0.4'} + hasBin: true + + responselike@4.0.2: + resolution: {integrity: sha512-cGk8IbWEAnaCpdAt1BHzJ3Ahz5ewDJa0KseTsE3qIRMJ3C698W8psM7byCeWVpd/Ha7FUYzuRVzXoKoM6nRUbA==} + engines: {node: '>=20'} + + retry-request@7.0.2: + resolution: {integrity: sha512-dUOvLMJ0/JJYEn8NrpOaGNE7X3vpI5XlZS/u0ANjqtcZVKnIxP7IgCFwrKTxENw29emmwug53awKtaMm4i9g5w==} + engines: {node: '>=14'} + + retry@0.12.0: + resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==} + engines: {node: '>= 4'} + + retry@0.13.1: + resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} + engines: {node: '>= 4'} + + rollup@4.59.0: + resolution: {integrity: sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + router@2.2.0: + resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} + engines: {node: '>= 18'} + + run-applescript@7.1.0: + resolution: {integrity: sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==} + engines: {node: '>=18'} + + safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + selderee@0.11.0: + resolution: {integrity: sha512-5TF+l7p4+OsnP8BCCvSyZiSPc4x4//p5uPwK8TCnVPJYRmU2aYKMpOXvw8zM5a5JvuuCGN1jmsMwuU2W02ukfA==} + + semver@7.7.4: + resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==} + engines: {node: '>=10'} + hasBin: true + + send@1.2.1: + resolution: {integrity: sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==} + engines: {node: '>= 18'} + + serve-static@2.2.1: + resolution: {integrity: sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==} + engines: {node: '>= 18'} + + setprototypeof@1.2.0: + resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + shell-quote@1.8.3: + resolution: {integrity: sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==} + engines: {node: '>= 0.4'} + + side-channel-list@1.0.0: + resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} + engines: {node: '>= 0.4'} + + side-channel-map@1.0.1: + resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==} + engines: {node: '>= 0.4'} + + side-channel-weakmap@1.0.2: + resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==} + engines: {node: '>= 0.4'} + + side-channel@1.1.0: + resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} + engines: {node: '>= 0.4'} + + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + + signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + + signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + + simple-concat@1.0.1: + resolution: {integrity: sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==} + + simple-get@4.0.1: + resolution: {integrity: sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==} + + simple-git@3.33.0: + resolution: {integrity: sha512-D4V/tGC2sjsoNhoMybKyGoE+v8A60hRawKQ1iFRA1zwuDgGZCBJ4ByOzZ5J8joBbi4Oam0qiPH+GhzmSBwbJng==} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + spdx-correct@3.2.0: + resolution: {integrity: sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==} + + spdx-exceptions@2.5.0: + resolution: {integrity: sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==} + + spdx-expression-parse@3.0.1: + resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} + + spdx-license-ids@3.0.23: + resolution: {integrity: sha512-CWLcCCH7VLu13TgOH+r8p1O/Znwhqv/dbb6lqWy67G+pT1kHmeD/+V36AVb/vq8QMIQwVShJ6Ssl5FPh0fuSdw==} + + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + + statuses@2.0.2: + resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} + engines: {node: '>= 0.8'} + + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + + stream-events@1.0.5: + resolution: {integrity: sha512-E1GUzBSgvct8Jsb3v2X15pjzN1tYebtbLaMg+eBOUOAxgbLoSbT2NS91ckc5lJD1KfLjId+jXJRgo0qnV5Nerg==} + + stream-shift@1.0.3: + resolution: {integrity: sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ==} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-ansi@7.2.0: + resolution: {integrity: sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==} + engines: {node: '>=12'} + + strip-final-newline@4.0.0: + resolution: {integrity: sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==} + engines: {node: '>=18'} + + strip-json-comments@2.0.1: + resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==} + engines: {node: '>=0.10.0'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + strip-literal@3.1.0: + resolution: {integrity: sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==} + + stubs@3.0.0: + resolution: {integrity: sha512-PdHt7hHUJKxvTCgbKX9C1V/ftOcjJQgz8BZwNfV5c4B6dcGqlpelTbJ999jBGZ2jYiPAwcX5dP6oBwVlBlUbxw==} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + systeminformation@5.31.4: + resolution: {integrity: sha512-lZppDyQx91VdS5zJvAyGkmwe+Mq6xY978BDUG2wRkWE+jkmUF5ti8cvOovFQoN5bvSFKCXVkyKEaU5ec3SJiRg==} + engines: {node: '>=8.0.0'} + os: [darwin, linux, win32, freebsd, openbsd, netbsd, sunos, android] + hasBin: true + + tar-fs@2.1.4: + resolution: {integrity: sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==} + + tar-stream@2.2.0: + resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==} + engines: {node: '>=6'} + + teeny-request@9.0.0: + resolution: {integrity: sha512-resvxdc6Mgb7YEThw6G6bExlXKkv6+YbuzGg9xuXxSgxJF7Ozs+o8Y9+2R3sArdWdW8nOokoQb1yrpFB0pQK2g==} + engines: {node: '>=14'} + + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@0.3.2: + resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + tinypool@1.1.1: + resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==} + engines: {node: ^18.0.0 || >=20.0.0} + + tinyrainbow@2.0.0: + resolution: {integrity: sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==} + engines: {node: '>=14.0.0'} + + tinyspy@4.0.4: + resolution: {integrity: sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==} + engines: {node: '>=14.0.0'} + + toidentifier@1.0.1: + resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} + engines: {node: '>=0.6'} + + tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + + tree-sitter-bash@0.25.1: + resolution: {integrity: sha512-7hMytuYIMoXOq24yRulgIxthE9YmggZIOHCyPTTuJcu6EU54tYD+4G39cUb28kxC6jMf/AbPfWGLQtgPTdh3xw==} + peerDependencies: + tree-sitter: ^0.25.0 + peerDependenciesMeta: + tree-sitter: + optional: true + + tsscmp@1.0.6: + resolution: {integrity: sha512-LxhtAkPDTkVCMQjt2h6eBVY28KCjikZqZfMcC15YBeNjkgUpdCfBu5HoiOTDu86v6smE8yOjyEktJ8hlbANHQA==} + engines: {node: '>=0.6.x'} + + tunnel-agent@0.6.0: + resolution: {integrity: sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==} + + type-fest@4.41.0: + resolution: {integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==} + engines: {node: '>=16'} + + type-is@2.0.1: + resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==} + engines: {node: '>= 0.6'} + + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + engines: {node: '>=14.17'} + hasBin: true + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + undici@7.24.4: + resolution: {integrity: sha512-BM/JzwwaRXxrLdElV2Uo6cTLEjhSb3WXboncJamZ15NgUURmvlXvxa6xkwIOILIjPNo9i8ku136ZvWV0Uly8+w==} + engines: {node: '>=20.18.1'} + + unicorn-magic@0.1.0: + resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==} + engines: {node: '>=18'} + + unicorn-magic@0.3.0: + resolution: {integrity: sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==} + engines: {node: '>=18'} + + universalify@2.0.1: + resolution: {integrity: sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==} + engines: {node: '>= 10.0.0'} + + unpipe@1.0.0: + resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} + engines: {node: '>= 0.8'} + + url-template@2.0.8: + resolution: {integrity: sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw==} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + + uuid@13.0.0: + resolution: {integrity: sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==} + hasBin: true + + uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + + uuid@9.0.1: + resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} + hasBin: true + + validate-npm-package-license@3.0.4: + resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} + + vary@1.1.2: + resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} + engines: {node: '>= 0.8'} + + vite-node@3.2.4: + resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + + vite@7.3.1: + resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + engines: {node: ^20.19.0 || >=22.12.0} + hasBin: true + peerDependencies: + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vitest@3.2.4: + resolution: {integrity: sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@types/debug': ^4.1.12 + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + '@vitest/browser': 3.2.4 + '@vitest/ui': 3.2.4 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@types/debug': + optional: true + '@types/node': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + + web-streams-polyfill@3.3.3: + resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} + engines: {node: '>= 8'} + + web-tree-sitter@0.25.10: + resolution: {integrity: sha512-Y09sF44/13XvgVKgO2cNDw5rGk6s26MgoZPXLESvMXeefBf7i6/73eFurre0IsTW6E14Y0ArIzhUMmjoc7xyzA==} + peerDependencies: + '@types/emscripten': ^1.40.0 + peerDependenciesMeta: + '@types/emscripten': + optional: true + + webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + + whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + ws@8.19.0: + resolution: {integrity: sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + wsl-utils@0.1.0: + resolution: {integrity: sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==} + engines: {node: '>=18'} + + xdg-basedir@5.1.0: + resolution: {integrity: sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==} + engines: {node: '>=12'} + + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + + yaml@2.8.2: + resolution: {integrity: sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==} + engines: {node: '>= 14.6'} + hasBin: true + + yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + + yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + + yauzl@2.10.0: + resolution: {integrity: sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==} + + yoctocolors@2.1.2: + resolution: {integrity: sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==} + engines: {node: '>=18'} + + zod-to-json-schema@3.25.0: + resolution: {integrity: sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ==} + peerDependencies: + zod: ^3.25 || ^4 + + zod-to-json-schema@3.25.1: + resolution: {integrity: sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==} + peerDependencies: + zod: ^3.25 || ^4 + + zod@3.25.76: + resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} + + zod@4.3.6: + resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} + +snapshots: + + '@a2a-js/sdk@0.3.13(@grpc/grpc-js@1.14.3)(express@5.2.1)': + dependencies: + uuid: 11.1.0 + optionalDependencies: + '@grpc/grpc-js': 1.14.3 + express: 5.2.1 + + '@ai-sdk/gateway@3.0.66(zod@4.3.6)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.19(zod@4.3.6) + '@vercel/oidc': 3.1.0 + zod: 4.3.6 + + '@ai-sdk/provider-utils@4.0.19(zod@4.3.6)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 4.3.6 + + '@ai-sdk/provider@3.0.8': + dependencies: + json-schema: 0.4.0 + + '@anthropic-ai/claude-agent-sdk@0.2.76(zod@4.3.6)': + dependencies: + zod: 4.3.6 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.34.5 + '@img/sharp-darwin-x64': 0.34.5 + '@img/sharp-linux-arm': 0.34.5 + '@img/sharp-linux-arm64': 0.34.5 + '@img/sharp-linux-x64': 0.34.5 + '@img/sharp-linuxmusl-arm64': 0.34.5 + '@img/sharp-linuxmusl-x64': 0.34.5 + '@img/sharp-win32-arm64': 0.34.5 + '@img/sharp-win32-x64': 0.34.5 + + '@babel/code-frame@7.29.0': + dependencies: + '@babel/helper-validator-identifier': 7.28.5 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/helper-validator-identifier@7.28.5': {} + + '@biomejs/biome@1.9.4': + optionalDependencies: + '@biomejs/cli-darwin-arm64': 1.9.4 + '@biomejs/cli-darwin-x64': 1.9.4 + '@biomejs/cli-linux-arm64': 1.9.4 + '@biomejs/cli-linux-arm64-musl': 1.9.4 + '@biomejs/cli-linux-x64': 1.9.4 + '@biomejs/cli-linux-x64-musl': 1.9.4 + '@biomejs/cli-win32-arm64': 1.9.4 + '@biomejs/cli-win32-x64': 1.9.4 + + '@biomejs/cli-darwin-arm64@1.9.4': + optional: true + + '@biomejs/cli-darwin-x64@1.9.4': + optional: true + + '@biomejs/cli-linux-arm64-musl@1.9.4': + optional: true + + '@biomejs/cli-linux-arm64@1.9.4': + optional: true + + '@biomejs/cli-linux-x64-musl@1.9.4': + optional: true + + '@biomejs/cli-linux-x64@1.9.4': + optional: true + + '@biomejs/cli-win32-arm64@1.9.4': + optional: true + + '@biomejs/cli-win32-x64@1.9.4': + optional: true + + '@esbuild/aix-ppc64@0.27.3': + optional: true + + '@esbuild/android-arm64@0.27.3': + optional: true + + '@esbuild/android-arm@0.27.3': + optional: true + + '@esbuild/android-x64@0.27.3': + optional: true + + '@esbuild/darwin-arm64@0.27.3': + optional: true + + '@esbuild/darwin-x64@0.27.3': + optional: true + + '@esbuild/freebsd-arm64@0.27.3': + optional: true + + '@esbuild/freebsd-x64@0.27.3': + optional: true + + '@esbuild/linux-arm64@0.27.3': + optional: true + + '@esbuild/linux-arm@0.27.3': + optional: true + + '@esbuild/linux-ia32@0.27.3': + optional: true + + '@esbuild/linux-loong64@0.27.3': + optional: true + + '@esbuild/linux-mips64el@0.27.3': + optional: true + + '@esbuild/linux-ppc64@0.27.3': + optional: true + + '@esbuild/linux-riscv64@0.27.3': + optional: true + + '@esbuild/linux-s390x@0.27.3': + optional: true + + '@esbuild/linux-x64@0.27.3': + optional: true + + '@esbuild/netbsd-arm64@0.27.3': + optional: true + + '@esbuild/netbsd-x64@0.27.3': + optional: true + + '@esbuild/openbsd-arm64@0.27.3': + optional: true + + '@esbuild/openbsd-x64@0.27.3': + optional: true + + '@esbuild/openharmony-arm64@0.27.3': + optional: true + + '@esbuild/sunos-x64@0.27.3': + optional: true + + '@esbuild/win32-arm64@0.27.3': + optional: true + + '@esbuild/win32-ia32@0.27.3': + optional: true + + '@esbuild/win32-x64@0.27.3': + optional: true + + '@google-cloud/common@5.0.2': + dependencies: + '@google-cloud/projectify': 4.0.0 + '@google-cloud/promisify': 4.0.0 + arrify: 2.0.1 + duplexify: 4.1.3 + extend: 3.0.2 + google-auth-library: 9.15.1 + html-entities: 2.6.0 + retry-request: 7.0.2 + teeny-request: 9.0.0 + transitivePeerDependencies: + - encoding + - supports-color + + '@google-cloud/logging@11.2.1': + dependencies: + '@google-cloud/common': 5.0.2 + '@google-cloud/paginator': 5.0.2 + '@google-cloud/projectify': 4.0.0 + '@google-cloud/promisify': 4.0.0 + '@opentelemetry/api': 1.9.0 + arrify: 2.0.1 + dot-prop: 6.0.1 + eventid: 2.0.1 + extend: 3.0.2 + gcp-metadata: 6.1.1 + google-auth-library: 9.15.1 + google-gax: 4.6.1 + on-finished: 2.4.1 + pumpify: 2.0.1 + stream-events: 1.0.5 + uuid: 9.0.1 + transitivePeerDependencies: + - encoding + - supports-color + + '@google-cloud/opentelemetry-cloud-monitoring-exporter@0.21.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0))': + dependencies: + '@google-cloud/opentelemetry-resource-util': 3.0.0(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0)) + '@google-cloud/precise-date': 4.0.0 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.6.0(@opentelemetry/api@1.9.0) + google-auth-library: 9.15.1 + googleapis: 137.1.0 + transitivePeerDependencies: + - encoding + - supports-color + + '@google-cloud/opentelemetry-cloud-trace-exporter@3.0.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))': + dependencies: + '@google-cloud/opentelemetry-resource-util': 3.0.0(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0)) + '@grpc/grpc-js': 1.14.3 + '@grpc/proto-loader': 0.8.0 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.6.0(@opentelemetry/api@1.9.0) + google-auth-library: 9.15.1 + transitivePeerDependencies: + - encoding + - supports-color + + '@google-cloud/opentelemetry-resource-util@3.0.0(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))': + dependencies: + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + gcp-metadata: 6.1.1 + transitivePeerDependencies: + - encoding + - supports-color + + '@google-cloud/paginator@5.0.2': + dependencies: + arrify: 2.0.1 + extend: 3.0.2 + + '@google-cloud/precise-date@4.0.0': {} + + '@google-cloud/projectify@4.0.0': {} + + '@google-cloud/promisify@4.0.0': {} + + '@google/gemini-cli-core@0.22.4(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))': + dependencies: + '@google-cloud/logging': 11.2.1 + '@google-cloud/opentelemetry-cloud-monitoring-exporter': 0.21.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0)) + '@google-cloud/opentelemetry-cloud-trace-exporter': 3.0.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0)) + '@google/genai': 1.30.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) + '@iarna/toml': 2.2.5 + '@joshua.litt/get-ripgrep': 0.0.3 + '@modelcontextprotocol/sdk': 1.27.1(zod@3.25.76) + '@opentelemetry/api': 1.9.0 + '@opentelemetry/exporter-logs-otlp-grpc': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-grpc': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-grpc': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resource-detector-gcp': 0.40.3(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-node': 0.203.0(@opentelemetry/api@1.9.0) + '@types/glob': 8.1.0 + '@types/html-to-text': 9.0.4 + '@xterm/headless': 5.5.0 + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + chardet: 2.1.1 + diff: 7.0.0 + dotenv: 17.3.1 + fast-levenshtein: 2.0.6 + fast-uri: 3.1.0 + fdir: 6.5.0(picomatch@4.0.3) + fzf: 0.5.2 + glob: 12.0.0 + google-auth-library: 9.15.1 + html-to-text: 9.0.5 + https-proxy-agent: 7.0.6 + ignore: 7.0.5 + marked: 15.0.12 + mime: 4.0.7 + mnemonist: 0.40.3 + open: 10.2.0 + picomatch: 4.0.3 + read-package-up: 11.0.0 + shell-quote: 1.8.3 + simple-git: 3.33.0 + strip-ansi: 7.2.0 + tree-sitter-bash: 0.25.1 + undici: 7.24.4 + web-tree-sitter: 0.25.10 + ws: 8.19.0 + zod: 3.25.76 + optionalDependencies: + '@lydell/node-pty': 1.1.0 + '@lydell/node-pty-darwin-arm64': 1.1.0 + '@lydell/node-pty-darwin-x64': 1.1.0 + '@lydell/node-pty-linux-x64': 1.1.0 + '@lydell/node-pty-win32-arm64': 1.1.0 + '@lydell/node-pty-win32-x64': 1.1.0 + node-pty: 1.1.0 + transitivePeerDependencies: + - '@cfworker/json-schema' + - '@opentelemetry/core' + - '@opentelemetry/resources' + - '@opentelemetry/sdk-metrics' + - '@opentelemetry/sdk-trace-base' + - '@types/emscripten' + - bufferutil + - encoding + - supports-color + - tree-sitter + - utf-8-validate + + '@google/gemini-cli-core@0.33.2(express@5.2.1)': + dependencies: + '@a2a-js/sdk': 0.3.13(@grpc/grpc-js@1.14.3)(express@5.2.1) + '@google-cloud/logging': 11.2.1 + '@google-cloud/opentelemetry-cloud-monitoring-exporter': 0.21.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0)) + '@google-cloud/opentelemetry-cloud-trace-exporter': 3.0.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0)) + '@google/genai': 1.30.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) + '@grpc/grpc-js': 1.14.3 + '@iarna/toml': 2.2.5 + '@joshua.litt/get-ripgrep': 0.0.3 + '@modelcontextprotocol/sdk': 1.27.1(zod@3.25.76) + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.211.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-grpc': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-grpc': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-grpc': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-node': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-node': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + '@types/html-to-text': 9.0.4 + '@xterm/headless': 5.5.0 + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + chardet: 2.1.1 + diff: 8.0.3 + dotenv: 17.3.1 + dotenv-expand: 12.0.3 + fast-levenshtein: 2.0.6 + fdir: 6.5.0(picomatch@4.0.3) + fzf: 0.5.2 + glob: 12.0.0 + google-auth-library: 9.15.1 + html-to-text: 9.0.5 + https-proxy-agent: 7.0.6 + ignore: 7.0.5 + js-yaml: 4.1.1 + marked: 15.0.12 + mime: 4.0.7 + mnemonist: 0.40.3 + open: 10.2.0 + picomatch: 4.0.3 + proper-lockfile: 4.1.2 + read-package-up: 11.0.0 + shell-quote: 1.8.3 + simple-git: 3.33.0 + strip-ansi: 7.2.0 + strip-json-comments: 3.1.1 + systeminformation: 5.31.4 + tree-sitter-bash: 0.25.1 + undici: 7.24.4 + uuid: 13.0.0 + web-tree-sitter: 0.25.10 + zod: 3.25.76 + zod-to-json-schema: 3.25.1(zod@3.25.76) + optionalDependencies: + '@lydell/node-pty': 1.1.0 + '@lydell/node-pty-darwin-arm64': 1.1.0 + '@lydell/node-pty-darwin-x64': 1.1.0 + '@lydell/node-pty-linux-x64': 1.1.0 + '@lydell/node-pty-win32-arm64': 1.1.0 + '@lydell/node-pty-win32-x64': 1.1.0 + keytar: 7.9.0 + node-pty: 1.1.0 + transitivePeerDependencies: + - '@bufbuild/protobuf' + - '@cfworker/json-schema' + - '@types/emscripten' + - bufferutil + - encoding + - express + - supports-color + - tree-sitter + - utf-8-validate + + '@google/genai@1.30.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))': + dependencies: + google-auth-library: 10.6.2 + ws: 8.19.0 + optionalDependencies: + '@modelcontextprotocol/sdk': 1.27.1(zod@3.25.76) + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + '@google/genai@1.45.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))': + dependencies: + google-auth-library: 10.6.2 + p-retry: 4.6.2 + protobufjs: 7.5.4 + ws: 8.19.0 + optionalDependencies: + '@modelcontextprotocol/sdk': 1.27.1(zod@3.25.76) + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + '@grpc/grpc-js@1.14.3': + dependencies: + '@grpc/proto-loader': 0.8.0 + '@js-sdsl/ordered-map': 4.4.2 + + '@grpc/proto-loader@0.7.15': + dependencies: + lodash.camelcase: 4.3.0 + long: 5.3.2 + protobufjs: 7.5.4 + yargs: 17.7.2 + + '@grpc/proto-loader@0.8.0': + dependencies: + lodash.camelcase: 4.3.0 + long: 5.3.2 + protobufjs: 7.5.4 + yargs: 17.7.2 + + '@hono/node-server@1.19.11(hono@4.12.8)': + dependencies: + hono: 4.12.8 + + '@iarna/toml@2.2.5': {} + + '@img/sharp-darwin-arm64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.2.4 + optional: true + + '@img/sharp-darwin-x64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.2.4 + optional: true + + '@img/sharp-libvips-darwin-arm64@1.2.4': + optional: true + + '@img/sharp-libvips-darwin-x64@1.2.4': + optional: true + + '@img/sharp-libvips-linux-arm64@1.2.4': + optional: true + + '@img/sharp-libvips-linux-arm@1.2.4': + optional: true + + '@img/sharp-libvips-linux-x64@1.2.4': + optional: true + + '@img/sharp-libvips-linuxmusl-arm64@1.2.4': + optional: true + + '@img/sharp-libvips-linuxmusl-x64@1.2.4': + optional: true + + '@img/sharp-linux-arm64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.2.4 + optional: true + + '@img/sharp-linux-arm@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.2.4 + optional: true + + '@img/sharp-linux-x64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.2.4 + optional: true + + '@img/sharp-linuxmusl-arm64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.2.4 + optional: true + + '@img/sharp-linuxmusl-x64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.2.4 + optional: true + + '@img/sharp-win32-arm64@0.34.5': + optional: true + + '@img/sharp-win32-x64@0.34.5': + optional: true + + '@isaacs/cliui@9.0.0': {} + + '@joshua.litt/get-ripgrep@0.0.3': + dependencies: + '@lvce-editor/verror': 1.7.0 + execa: 9.6.1 + extract-zip: 2.0.1 + fs-extra: 11.3.4 + got: 14.6.6 + path-exists: 5.0.0 + xdg-basedir: 5.1.0 + transitivePeerDependencies: + - supports-color + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@js-sdsl/ordered-map@4.4.2': {} + + '@keyv/serialize@1.1.1': {} + + '@kwsites/file-exists@1.1.1': + dependencies: + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + '@kwsites/promise-deferred@1.1.1': {} + + '@lvce-editor/verror@1.7.0': {} + + '@lydell/node-pty-darwin-arm64@1.1.0': + optional: true + + '@lydell/node-pty-darwin-x64@1.1.0': + optional: true + + '@lydell/node-pty-linux-arm64@1.1.0': + optional: true + + '@lydell/node-pty-linux-x64@1.1.0': + optional: true + + '@lydell/node-pty-win32-arm64@1.1.0': + optional: true + + '@lydell/node-pty-win32-x64@1.1.0': + optional: true + + '@lydell/node-pty@1.1.0': + optionalDependencies: + '@lydell/node-pty-darwin-arm64': 1.1.0 + '@lydell/node-pty-darwin-x64': 1.1.0 + '@lydell/node-pty-linux-arm64': 1.1.0 + '@lydell/node-pty-linux-x64': 1.1.0 + '@lydell/node-pty-win32-arm64': 1.1.0 + '@lydell/node-pty-win32-x64': 1.1.0 + optional: true + + '@modelcontextprotocol/sdk@1.27.1(zod@3.25.76)': + dependencies: + '@hono/node-server': 1.19.11(hono@4.12.8) + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + content-type: 1.0.5 + cors: 2.8.6 + cross-spawn: 7.0.6 + eventsource: 3.0.7 + eventsource-parser: 3.0.6 + express: 5.2.1 + express-rate-limit: 8.3.1(express@5.2.1) + hono: 4.12.8 + jose: 6.2.1 + json-schema-typed: 8.0.2 + pkce-challenge: 5.0.1 + raw-body: 3.0.2 + zod: 3.25.76 + zod-to-json-schema: 3.25.1(zod@3.25.76) + transitivePeerDependencies: + - supports-color + + '@opentelemetry/api-logs@0.203.0': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/api-logs@0.211.0': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/api@1.9.0': {} + + '@opentelemetry/configuration@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + yaml: 2.8.2 + + '@opentelemetry/context-async-hooks@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/context-async-hooks@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/context-async-hooks@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/core@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/core@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/exporter-logs-otlp-grpc@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.203.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-logs-otlp-grpc@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.211.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-logs-otlp-http@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.203.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.203.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-logs-otlp-http@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.211.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.211.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-logs-otlp-proto@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.203.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-logs-otlp-proto@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.211.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-grpc@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-grpc@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-http@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-http@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-proto@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-metrics-otlp-proto@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-prometheus@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-prometheus@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-grpc@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-grpc@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-grpc-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-http@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-http@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-trace-otlp-proto@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/exporter-zipkin@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/exporter-zipkin@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/instrumentation-http@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + forwarded-parse: 2.1.2 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/instrumentation-http@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + forwarded-parse: 2.1.2 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/instrumentation@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.203.0 + import-in-the-middle: 1.15.0 + require-in-the-middle: 7.5.2 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/instrumentation@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.211.0 + import-in-the-middle: 2.0.6 + require-in-the-middle: 8.0.1 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/otlp-exporter-base@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-exporter-base@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-grpc-exporter-base@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-grpc-exporter-base@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@grpc/grpc-js': 1.14.3 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.211.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.203.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + protobufjs: 7.5.4 + + '@opentelemetry/otlp-transformer@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.211.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + protobufjs: 8.0.0 + + '@opentelemetry/propagator-b3@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/propagator-b3@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/propagator-jaeger@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/propagator-jaeger@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/resource-detector-gcp@0.40.3(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + gcp-metadata: 6.1.1 + transitivePeerDependencies: + - encoding + - supports-color + + '@opentelemetry/resources@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/resources@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-logs@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.203.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-logs@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.211.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-node@0.203.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.203.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-grpc': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-proto': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-grpc': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-proto': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-prometheus': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-grpc': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-http': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-proto': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-zipkin': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-b3': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-jaeger': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.203.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-node': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/sdk-node@0.211.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.211.0 + '@opentelemetry/configuration': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/context-async-hooks': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-grpc': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-logs-otlp-proto': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-grpc': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-metrics-otlp-proto': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-prometheus': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-grpc': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-http': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-trace-otlp-proto': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/exporter-zipkin': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-b3': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-jaeger': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.211.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-node': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-trace-base@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.40.0 + + '@opentelemetry/sdk-trace-node@2.0.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/context-async-hooks': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-trace-node@2.5.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/context-async-hooks': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.5.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.5.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-trace-node@2.6.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/context-async-hooks': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.6.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.6.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/semantic-conventions@1.40.0': {} + + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + + '@rollup/rollup-android-arm-eabi@4.59.0': + optional: true + + '@rollup/rollup-android-arm64@4.59.0': + optional: true + + '@rollup/rollup-darwin-arm64@4.59.0': + optional: true + + '@rollup/rollup-darwin-x64@4.59.0': + optional: true + + '@rollup/rollup-freebsd-arm64@4.59.0': + optional: true + + '@rollup/rollup-freebsd-x64@4.59.0': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.59.0': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.59.0': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-loong64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-ppc64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-x64-musl@4.59.0': + optional: true + + '@rollup/rollup-openbsd-x64@4.59.0': + optional: true + + '@rollup/rollup-openharmony-arm64@4.59.0': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.59.0': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.59.0': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.59.0': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.59.0': + optional: true + + '@sec-ant/readable-stream@0.4.1': {} + + '@selderee/plugin-htmlparser2@0.11.0': + dependencies: + domhandler: 5.0.3 + selderee: 0.11.0 + + '@sindresorhus/is@7.2.0': {} + + '@sindresorhus/merge-streams@4.0.0': {} + + '@slack/bolt@4.6.0(@types/express@5.0.6)': + dependencies: + '@slack/logger': 4.0.1 + '@slack/oauth': 3.0.5 + '@slack/socket-mode': 2.0.6 + '@slack/types': 2.20.1 + '@slack/web-api': 7.15.0 + '@types/express': 5.0.6 + axios: 1.13.6 + express: 5.2.1 + path-to-regexp: 8.3.0 + raw-body: 3.0.2 + tsscmp: 1.0.6 + transitivePeerDependencies: + - bufferutil + - debug + - supports-color + - utf-8-validate + + '@slack/logger@4.0.1': + dependencies: + '@types/node': 22.19.15 + + '@slack/oauth@3.0.5': + dependencies: + '@slack/logger': 4.0.1 + '@slack/web-api': 7.15.0 + '@types/jsonwebtoken': 9.0.10 + '@types/node': 22.19.15 + jsonwebtoken: 9.0.3 + transitivePeerDependencies: + - debug + + '@slack/socket-mode@2.0.6': + dependencies: + '@slack/logger': 4.0.1 + '@slack/web-api': 7.15.0 + '@types/node': 22.19.15 + '@types/ws': 8.18.1 + eventemitter3: 5.0.4 + ws: 8.19.0 + transitivePeerDependencies: + - bufferutil + - debug + - utf-8-validate + + '@slack/types@2.20.1': {} + + '@slack/web-api@7.15.0': + dependencies: + '@slack/logger': 4.0.1 + '@slack/types': 2.20.1 + '@types/node': 22.19.15 + '@types/retry': 0.12.0 + axios: 1.13.6 + eventemitter3: 5.0.4 + form-data: 4.0.5 + is-electron: 2.2.2 + is-stream: 2.0.1 + p-queue: 6.6.2 + p-retry: 4.6.2 + retry: 0.13.1 + transitivePeerDependencies: + - debug + + '@standard-schema/spec@1.1.0': {} + + '@tootallnate/once@2.0.0': {} + + '@types/body-parser@1.19.6': + dependencies: + '@types/connect': 3.4.38 + '@types/node': 22.19.15 + + '@types/caseless@0.12.5': {} + + '@types/chai@5.2.3': + dependencies: + '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 + + '@types/connect@3.4.38': + dependencies: + '@types/node': 22.19.15 + + '@types/debug@4.1.13': + dependencies: + '@types/ms': 2.1.0 + optional: true + + '@types/deep-eql@4.0.2': {} + + '@types/estree@1.0.8': {} + + '@types/express-serve-static-core@5.1.1': + dependencies: + '@types/node': 22.19.15 + '@types/qs': 6.15.0 + '@types/range-parser': 1.2.7 + '@types/send': 1.2.1 + + '@types/express@5.0.6': + dependencies: + '@types/body-parser': 1.19.6 + '@types/express-serve-static-core': 5.1.1 + '@types/serve-static': 2.2.0 + + '@types/glob@8.1.0': + dependencies: + '@types/minimatch': 5.1.2 + '@types/node': 22.19.15 + + '@types/html-to-text@9.0.4': {} + + '@types/http-cache-semantics@4.2.0': {} + + '@types/http-errors@2.0.5': {} + + '@types/jsonwebtoken@9.0.10': + dependencies: + '@types/ms': 2.1.0 + '@types/node': 22.19.15 + + '@types/long@4.0.2': {} + + '@types/minimatch@5.1.2': {} + + '@types/ms@2.1.0': {} + + '@types/node@22.19.15': + dependencies: + undici-types: 6.21.0 + + '@types/normalize-package-data@2.4.4': {} + + '@types/qs@6.15.0': {} + + '@types/range-parser@1.2.7': {} + + '@types/request@2.48.13': + dependencies: + '@types/caseless': 0.12.5 + '@types/node': 22.19.15 + '@types/tough-cookie': 4.0.5 + form-data: 2.5.5 + + '@types/retry@0.12.0': {} + + '@types/send@1.2.1': + dependencies: + '@types/node': 22.19.15 + + '@types/serve-static@2.2.0': + dependencies: + '@types/http-errors': 2.0.5 + '@types/node': 22.19.15 + + '@types/tough-cookie@4.0.5': {} + + '@types/ws@8.18.1': + dependencies: + '@types/node': 22.19.15 + + '@types/yauzl@2.10.3': + dependencies: + '@types/node': 22.19.15 + optional: true + + '@vercel/oidc@3.1.0': {} + + '@vitest/expect@3.2.4': + dependencies: + '@types/chai': 5.2.3 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.3.3 + tinyrainbow: 2.0.0 + + '@vitest/mocker@3.2.4(vite@7.3.1(@types/node@22.19.15)(yaml@2.8.2))': + dependencies: + '@vitest/spy': 3.2.4 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.1(@types/node@22.19.15)(yaml@2.8.2) + + '@vitest/pretty-format@3.2.4': + dependencies: + tinyrainbow: 2.0.0 + + '@vitest/runner@3.2.4': + dependencies: + '@vitest/utils': 3.2.4 + pathe: 2.0.3 + strip-literal: 3.1.0 + + '@vitest/snapshot@3.2.4': + dependencies: + '@vitest/pretty-format': 3.2.4 + magic-string: 0.30.21 + pathe: 2.0.3 + + '@vitest/spy@3.2.4': + dependencies: + tinyspy: 4.0.4 + + '@vitest/utils@3.2.4': + dependencies: + '@vitest/pretty-format': 3.2.4 + loupe: 3.2.1 + tinyrainbow: 2.0.0 + + '@xterm/headless@5.5.0': {} + + abort-controller@3.0.0: + dependencies: + event-target-shim: 5.0.1 + + accepts@2.0.0: + dependencies: + mime-types: 3.0.2 + negotiator: 1.0.0 + + acorn-import-attributes@1.9.5(acorn@8.16.0): + dependencies: + acorn: 8.16.0 + + acorn@8.16.0: {} + + agent-base@6.0.2: + dependencies: + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + agent-base@7.1.4: {} + + ai-sdk-provider-claude-code@3.4.4(zod@4.3.6): + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.19(zod@4.3.6) + '@anthropic-ai/claude-agent-sdk': 0.2.76(zod@4.3.6) + zod: 4.3.6 + + ai-sdk-provider-gemini-cli@2.0.1(@modelcontextprotocol/sdk@1.27.1(zod@3.25.76))(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0))(zod@4.3.6): + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.19(zod@4.3.6) + '@google/gemini-cli-core': 0.22.4(@opentelemetry/core@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@2.6.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.6.0(@opentelemetry/api@1.9.0)) + '@google/genai': 1.30.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) + google-auth-library: 9.15.1 + zod: 4.3.6 + zod-to-json-schema: 3.25.0(zod@4.3.6) + transitivePeerDependencies: + - '@cfworker/json-schema' + - '@modelcontextprotocol/sdk' + - '@opentelemetry/core' + - '@opentelemetry/resources' + - '@opentelemetry/sdk-metrics' + - '@opentelemetry/sdk-trace-base' + - '@types/emscripten' + - bufferutil + - encoding + - supports-color + - tree-sitter + - utf-8-validate + + ai@6.0.116(zod@4.3.6): + dependencies: + '@ai-sdk/gateway': 3.0.66(zod@4.3.6) + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.19(zod@4.3.6) + '@opentelemetry/api': 1.9.0 + zod: 4.3.6 + + ajv-formats@3.0.1(ajv@8.18.0): + optionalDependencies: + ajv: 8.18.0 + + ajv@8.18.0: + dependencies: + fast-deep-equal: 3.1.3 + fast-uri: 3.1.0 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + + ansi-regex@5.0.1: {} + + ansi-regex@6.2.2: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + argparse@2.0.1: {} + + arrify@2.0.1: {} + + assertion-error@2.0.1: {} + + asynckit@0.4.0: {} + + axios@1.13.6: + dependencies: + follow-redirects: 1.15.11 + form-data: 4.0.5 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + + balanced-match@4.0.4: {} + + base64-js@1.5.1: {} + + bignumber.js@9.3.1: {} + + bl@4.1.0: + dependencies: + buffer: 5.7.1 + inherits: 2.0.4 + readable-stream: 3.6.2 + optional: true + + body-parser@2.2.2: + dependencies: + bytes: 3.1.2 + content-type: 1.0.5 + debug: 4.4.3 + http-errors: 2.0.1 + iconv-lite: 0.7.2 + on-finished: 2.4.1 + qs: 6.15.0 + raw-body: 3.0.2 + type-is: 2.0.1 + transitivePeerDependencies: + - supports-color + + brace-expansion@5.0.4: + dependencies: + balanced-match: 4.0.4 + + buffer-crc32@0.2.13: {} + + buffer-equal-constant-time@1.0.1: {} + + buffer@5.7.1: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + optional: true + + bundle-name@4.1.0: + dependencies: + run-applescript: 7.1.0 + + byte-counter@0.1.0: {} + + bytes@3.1.2: {} + + cac@6.7.14: {} + + cacheable-lookup@7.0.0: {} + + cacheable-request@13.0.18: + dependencies: + '@types/http-cache-semantics': 4.2.0 + get-stream: 9.0.1 + http-cache-semantics: 4.2.0 + keyv: 5.6.0 + mimic-response: 4.0.0 + normalize-url: 8.1.1 + responselike: 4.0.2 + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + call-bound@1.0.4: + dependencies: + call-bind-apply-helpers: 1.0.2 + get-intrinsic: 1.3.0 + + chai@5.3.3: + dependencies: + assertion-error: 2.0.1 + check-error: 2.1.3 + deep-eql: 5.0.2 + loupe: 3.2.1 + pathval: 2.0.1 + + chardet@2.1.1: {} + + check-error@2.1.3: {} + + chownr@1.1.4: + optional: true + + cjs-module-lexer@1.4.3: {} + + cjs-module-lexer@2.2.0: {} + + cliui@8.0.1: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + commander@10.0.1: {} + + content-disposition@1.0.1: {} + + content-type@1.0.5: {} + + cookie-signature@1.2.2: {} + + cookie@0.7.2: {} + + cors@2.8.6: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + data-uri-to-buffer@4.0.1: {} + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + decompress-response@10.0.0: + dependencies: + mimic-response: 4.0.0 + + decompress-response@6.0.0: + dependencies: + mimic-response: 3.1.0 + optional: true + + deep-eql@5.0.2: {} + + deep-extend@0.6.0: + optional: true + + deepmerge@4.3.1: {} + + default-browser-id@5.0.1: {} + + default-browser@5.5.0: + dependencies: + bundle-name: 4.1.0 + default-browser-id: 5.0.1 + + define-lazy-prop@3.0.0: {} + + delayed-stream@1.0.0: {} + + depd@2.0.0: {} + + detect-libc@2.1.2: + optional: true + + diff@7.0.0: {} + + diff@8.0.3: {} + + dom-serializer@2.0.0: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + entities: 4.5.0 + + domelementtype@2.3.0: {} + + domhandler@5.0.3: + dependencies: + domelementtype: 2.3.0 + + domutils@3.2.2: + dependencies: + dom-serializer: 2.0.0 + domelementtype: 2.3.0 + domhandler: 5.0.3 + + dot-prop@6.0.1: + dependencies: + is-obj: 2.0.0 + + dotenv-expand@12.0.3: + dependencies: + dotenv: 16.6.1 + + dotenv@16.6.1: {} + + dotenv@17.3.1: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + duplexify@4.1.3: + dependencies: + end-of-stream: 1.4.5 + inherits: 2.0.4 + readable-stream: 3.6.2 + stream-shift: 1.0.3 + + ecdsa-sig-formatter@1.0.11: + dependencies: + safe-buffer: 5.2.1 + + ee-first@1.1.1: {} + + emoji-regex@8.0.0: {} + + encodeurl@2.0.0: {} + + end-of-stream@1.4.5: + dependencies: + once: 1.4.0 + + entities@4.5.0: {} + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-module-lexer@1.7.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + esbuild@0.27.3: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.3 + '@esbuild/android-arm': 0.27.3 + '@esbuild/android-arm64': 0.27.3 + '@esbuild/android-x64': 0.27.3 + '@esbuild/darwin-arm64': 0.27.3 + '@esbuild/darwin-x64': 0.27.3 + '@esbuild/freebsd-arm64': 0.27.3 + '@esbuild/freebsd-x64': 0.27.3 + '@esbuild/linux-arm': 0.27.3 + '@esbuild/linux-arm64': 0.27.3 + '@esbuild/linux-ia32': 0.27.3 + '@esbuild/linux-loong64': 0.27.3 + '@esbuild/linux-mips64el': 0.27.3 + '@esbuild/linux-ppc64': 0.27.3 + '@esbuild/linux-riscv64': 0.27.3 + '@esbuild/linux-s390x': 0.27.3 + '@esbuild/linux-x64': 0.27.3 + '@esbuild/netbsd-arm64': 0.27.3 + '@esbuild/netbsd-x64': 0.27.3 + '@esbuild/openbsd-arm64': 0.27.3 + '@esbuild/openbsd-x64': 0.27.3 + '@esbuild/openharmony-arm64': 0.27.3 + '@esbuild/sunos-x64': 0.27.3 + '@esbuild/win32-arm64': 0.27.3 + '@esbuild/win32-ia32': 0.27.3 + '@esbuild/win32-x64': 0.27.3 + + escalade@3.2.0: {} + + escape-html@1.0.3: {} + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + etag@1.8.1: {} + + event-target-shim@5.0.1: {} + + eventemitter3@4.0.7: {} + + eventemitter3@5.0.4: {} + + eventid@2.0.1: + dependencies: + uuid: 8.3.2 + + eventsource-parser@3.0.6: {} - expect-type@1.3.0: - resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} - engines: {node: '>=12.0.0'} + eventsource@3.0.7: + dependencies: + eventsource-parser: 3.0.6 - fdir@6.5.0: - resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} - engines: {node: '>=12.0.0'} - peerDependencies: - picomatch: ^3 || ^4 - peerDependenciesMeta: - picomatch: - optional: true + execa@9.6.1: + dependencies: + '@sindresorhus/merge-streams': 4.0.0 + cross-spawn: 7.0.6 + figures: 6.1.0 + get-stream: 9.0.1 + human-signals: 8.0.1 + is-plain-obj: 4.1.0 + is-stream: 4.0.1 + npm-run-path: 6.0.0 + pretty-ms: 9.3.0 + signal-exit: 4.1.0 + strip-final-newline: 4.0.0 + yoctocolors: 2.1.2 + + expand-template@2.0.3: + optional: true + + expect-type@1.3.0: {} + + express-rate-limit@8.3.1(express@5.2.1): + dependencies: + express: 5.2.1 + ip-address: 10.1.0 + + express@5.2.1: + dependencies: + accepts: 2.0.0 + body-parser: 2.2.2 + content-disposition: 1.0.1 + content-type: 1.0.5 + cookie: 0.7.2 + cookie-signature: 1.2.2 + debug: 4.4.3 + depd: 2.0.0 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 2.1.1 + fresh: 2.0.0 + http-errors: 2.0.1 + merge-descriptors: 2.0.0 + mime-types: 3.0.2 + on-finished: 2.4.1 + once: 1.4.0 + parseurl: 1.3.3 + proxy-addr: 2.0.7 + qs: 6.15.0 + range-parser: 1.2.1 + router: 2.2.0 + send: 1.2.1 + serve-static: 2.2.1 + statuses: 2.0.2 + type-is: 2.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + + extend@3.0.2: {} + + extract-zip@2.0.1: + dependencies: + debug: 4.4.3 + get-stream: 5.2.0 + yauzl: 2.10.0 + optionalDependencies: + '@types/yauzl': 2.10.3 + transitivePeerDependencies: + - supports-color + + fast-deep-equal@3.1.3: {} + + fast-levenshtein@2.0.6: {} + + fast-uri@3.1.0: {} + + fd-slicer@1.1.0: + dependencies: + pend: 1.2.0 + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + fetch-blob@3.2.0: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 3.3.3 + + figures@6.1.0: + dependencies: + is-unicode-supported: 2.1.0 + + finalhandler@2.1.1: + dependencies: + debug: 4.4.3 + encodeurl: 2.0.0 + escape-html: 1.0.3 + on-finished: 2.4.1 + parseurl: 1.3.3 + statuses: 2.0.2 + transitivePeerDependencies: + - supports-color + + find-up-simple@1.0.1: {} + + follow-redirects@1.15.11: {} + + foreground-child@3.3.1: + dependencies: + cross-spawn: 7.0.6 + signal-exit: 4.1.0 + + form-data-encoder@4.1.0: {} + + form-data@2.5.5: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + hasown: 2.0.2 + mime-types: 2.1.35 + safe-buffer: 5.2.1 + + form-data@4.0.5: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + hasown: 2.0.2 + mime-types: 2.1.35 + + formdata-polyfill@4.0.10: + dependencies: + fetch-blob: 3.2.0 + + forwarded-parse@2.1.2: {} + + forwarded@0.2.0: {} + + fresh@2.0.0: {} + + fs-constants@1.0.0: + optional: true + + fs-extra@11.3.4: + dependencies: + graceful-fs: 4.2.11 + jsonfile: 6.2.0 + universalify: 2.0.1 fsevents@2.3.3: - resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} - engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} - os: [darwin] + optional: true - graphql@16.13.1: - resolution: {integrity: sha512-gGgrVCoDKlIZ8fIqXBBb0pPKqDgki0Z/FSKNiQzSGj2uEYHr1tq5wmBegGwJx6QB5S5cM0khSBpi/JFHMCvsmQ==} - engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + function-bind@1.1.2: {} - js-tokens@9.0.1: - resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} + fzf@0.5.2: {} - liquidjs@10.24.0: - resolution: {integrity: sha512-TAUNAdgwaAXjjcUFuYVJm9kOVH7zc0mTKxsG9t9Lu4qdWjB2BEblyVIYpjWcmJLMGgiYqnGNJjpNMHx0gp/46A==} - engines: {node: '>=16'} - hasBin: true + gaxios@6.7.1: + dependencies: + extend: 3.0.2 + https-proxy-agent: 7.0.6 + is-stream: 2.0.1 + node-fetch: 2.7.0 + uuid: 9.0.1 + transitivePeerDependencies: + - encoding + - supports-color - loupe@3.2.1: - resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} + gaxios@7.1.4: + dependencies: + extend: 3.0.2 + https-proxy-agent: 7.0.6 + node-fetch: 3.3.2 + transitivePeerDependencies: + - supports-color - magic-string@0.30.21: - resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + gcp-metadata@6.1.1: + dependencies: + gaxios: 6.7.1 + google-logging-utils: 0.0.2 + json-bigint: 1.0.0 + transitivePeerDependencies: + - encoding + - supports-color - ms@2.1.3: - resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + gcp-metadata@8.1.2: + dependencies: + gaxios: 7.1.4 + google-logging-utils: 1.1.3 + json-bigint: 1.0.0 + transitivePeerDependencies: + - supports-color - nanoid@3.3.11: - resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} - engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} - hasBin: true + get-caller-file@2.0.5: {} - pathe@2.0.3: - resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 - pathval@2.0.1: - resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} - engines: {node: '>= 14.16'} + get-stream@5.2.0: + dependencies: + pump: 3.0.4 - picocolors@1.1.1: - resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + get-stream@9.0.1: + dependencies: + '@sec-ant/readable-stream': 0.4.1 + is-stream: 4.0.1 - picomatch@4.0.3: - resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} - engines: {node: '>=12'} + github-from-package@0.0.0: + optional: true - postcss@8.5.8: - resolution: {integrity: sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==} - engines: {node: ^10 || ^12 || >=14} + glob@12.0.0: + dependencies: + foreground-child: 3.3.1 + jackspeak: 4.2.3 + minimatch: 10.2.4 + minipass: 7.1.3 + package-json-from-dist: 1.0.1 + path-scurry: 2.0.2 + + google-auth-library@10.6.2: + dependencies: + base64-js: 1.5.1 + ecdsa-sig-formatter: 1.0.11 + gaxios: 7.1.4 + gcp-metadata: 8.1.2 + google-logging-utils: 1.1.3 + jws: 4.0.1 + transitivePeerDependencies: + - supports-color - rollup@4.59.0: - resolution: {integrity: sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==} - engines: {node: '>=18.0.0', npm: '>=8.0.0'} - hasBin: true + google-auth-library@9.15.1: + dependencies: + base64-js: 1.5.1 + ecdsa-sig-formatter: 1.0.11 + gaxios: 6.7.1 + gcp-metadata: 6.1.1 + gtoken: 7.1.0 + jws: 4.0.1 + transitivePeerDependencies: + - encoding + - supports-color - siginfo@2.0.0: - resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + google-gax@4.6.1: + dependencies: + '@grpc/grpc-js': 1.14.3 + '@grpc/proto-loader': 0.7.15 + '@types/long': 4.0.2 + abort-controller: 3.0.0 + duplexify: 4.1.3 + google-auth-library: 9.15.1 + node-fetch: 2.7.0 + object-hash: 3.0.0 + proto3-json-serializer: 2.0.2 + protobufjs: 7.5.4 + retry-request: 7.0.2 + uuid: 9.0.1 + transitivePeerDependencies: + - encoding + - supports-color - source-map-js@1.2.1: - resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} - engines: {node: '>=0.10.0'} + google-logging-utils@0.0.2: {} - stackback@0.0.2: - resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + google-logging-utils@1.1.3: {} - std-env@3.10.0: - resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + googleapis-common@7.2.0: + dependencies: + extend: 3.0.2 + gaxios: 6.7.1 + google-auth-library: 9.15.1 + qs: 6.15.0 + url-template: 2.0.8 + uuid: 9.0.1 + transitivePeerDependencies: + - encoding + - supports-color - strip-literal@3.1.0: - resolution: {integrity: sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==} + googleapis@137.1.0: + dependencies: + google-auth-library: 9.15.1 + googleapis-common: 7.2.0 + transitivePeerDependencies: + - encoding + - supports-color - tinybench@2.9.0: - resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + gopd@1.2.0: {} - tinyexec@0.3.2: - resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + got@14.6.6: + dependencies: + '@sindresorhus/is': 7.2.0 + byte-counter: 0.1.0 + cacheable-lookup: 7.0.0 + cacheable-request: 13.0.18 + decompress-response: 10.0.0 + form-data-encoder: 4.1.0 + http2-wrapper: 2.2.1 + keyv: 5.6.0 + lowercase-keys: 3.0.0 + p-cancelable: 4.0.1 + responselike: 4.0.2 + type-fest: 4.41.0 + + graceful-fs@4.2.11: {} - tinyglobby@0.2.15: - resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} - engines: {node: '>=12.0.0'} + graphql@16.13.1: {} - tinypool@1.1.1: - resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==} - engines: {node: ^18.0.0 || >=20.0.0} + gtoken@7.1.0: + dependencies: + gaxios: 6.7.1 + jws: 4.0.1 + transitivePeerDependencies: + - encoding + - supports-color - tinyrainbow@2.0.0: - resolution: {integrity: sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==} - engines: {node: '>=14.0.0'} + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + hono@4.12.8: {} + + hosted-git-info@7.0.2: + dependencies: + lru-cache: 10.4.3 + + html-entities@2.6.0: {} + + html-to-text@9.0.5: + dependencies: + '@selderee/plugin-htmlparser2': 0.11.0 + deepmerge: 4.3.1 + dom-serializer: 2.0.0 + htmlparser2: 8.0.2 + selderee: 0.11.0 + + htmlparser2@8.0.2: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + domutils: 3.2.2 + entities: 4.5.0 + + http-cache-semantics@4.2.0: {} + + http-errors@2.0.1: + dependencies: + depd: 2.0.0 + inherits: 2.0.4 + setprototypeof: 1.2.0 + statuses: 2.0.2 + toidentifier: 1.0.1 + + http-proxy-agent@5.0.0: + dependencies: + '@tootallnate/once': 2.0.0 + agent-base: 6.0.2 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + http2-wrapper@2.2.1: + dependencies: + quick-lru: 5.1.1 + resolve-alpn: 1.2.1 + + https-proxy-agent@5.0.1: + dependencies: + agent-base: 6.0.2 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + human-signals@8.0.1: {} + + iconv-lite@0.7.2: + dependencies: + safer-buffer: 2.1.2 + + ieee754@1.2.1: + optional: true + + ignore@7.0.5: {} + + import-in-the-middle@1.15.0: + dependencies: + acorn: 8.16.0 + acorn-import-attributes: 1.9.5(acorn@8.16.0) + cjs-module-lexer: 1.4.3 + module-details-from-path: 1.0.4 + + import-in-the-middle@2.0.6: + dependencies: + acorn: 8.16.0 + acorn-import-attributes: 1.9.5(acorn@8.16.0) + cjs-module-lexer: 2.2.0 + module-details-from-path: 1.0.4 + + index-to-position@1.2.0: {} + + inherits@2.0.4: {} + + ini@1.3.8: + optional: true + + ip-address@10.1.0: {} + + ipaddr.js@1.9.1: {} + + is-core-module@2.16.1: + dependencies: + hasown: 2.0.2 + + is-docker@3.0.0: {} + + is-electron@2.2.2: {} + + is-fullwidth-code-point@3.0.0: {} + + is-inside-container@1.0.0: + dependencies: + is-docker: 3.0.0 + + is-obj@2.0.0: {} + + is-plain-obj@4.1.0: {} + + is-promise@4.0.0: {} + + is-stream@2.0.1: {} + + is-stream@4.0.1: {} + + is-unicode-supported@2.1.0: {} + + is-wsl@3.1.1: + dependencies: + is-inside-container: 1.0.0 + + isexe@2.0.0: {} + + jackspeak@4.2.3: + dependencies: + '@isaacs/cliui': 9.0.0 + + jose@6.2.1: {} + + js-tokens@4.0.0: {} + + js-tokens@9.0.1: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + json-bigint@1.0.0: + dependencies: + bignumber.js: 9.3.1 + + json-schema-traverse@1.0.0: {} + + json-schema-typed@8.0.2: {} + + json-schema@0.4.0: {} + + jsonfile@6.2.0: + dependencies: + universalify: 2.0.1 + optionalDependencies: + graceful-fs: 4.2.11 + + jsonwebtoken@9.0.3: + dependencies: + jws: 4.0.1 + lodash.includes: 4.3.0 + lodash.isboolean: 3.0.3 + lodash.isinteger: 4.0.4 + lodash.isnumber: 3.0.3 + lodash.isplainobject: 4.0.6 + lodash.isstring: 4.0.1 + lodash.once: 4.1.1 + ms: 2.1.3 + semver: 7.7.4 + + jwa@2.0.1: + dependencies: + buffer-equal-constant-time: 1.0.1 + ecdsa-sig-formatter: 1.0.11 + safe-buffer: 5.2.1 + + jws@4.0.1: + dependencies: + jwa: 2.0.1 + safe-buffer: 5.2.1 + + keytar@7.9.0: + dependencies: + node-addon-api: 4.3.0 + prebuild-install: 7.1.3 + optional: true - tinyspy@4.0.4: - resolution: {integrity: sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==} - engines: {node: '>=14.0.0'} + keyv@5.6.0: + dependencies: + '@keyv/serialize': 1.1.1 - typescript@5.9.3: - resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} - engines: {node: '>=14.17'} - hasBin: true + leac@0.6.0: {} - undici-types@6.21.0: - resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + liquidjs@10.24.0: + dependencies: + commander: 10.0.1 - vite-node@3.2.4: - resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==} - engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} - hasBin: true + lodash.camelcase@4.3.0: {} - vite@7.3.1: - resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} - engines: {node: ^20.19.0 || >=22.12.0} - hasBin: true - peerDependencies: - '@types/node': ^20.19.0 || >=22.12.0 - jiti: '>=1.21.0' - less: ^4.0.0 - lightningcss: ^1.21.0 - sass: ^1.70.0 - sass-embedded: ^1.70.0 - stylus: '>=0.54.8' - sugarss: ^5.0.0 - terser: ^5.16.0 - tsx: ^4.8.1 - yaml: ^2.4.2 - peerDependenciesMeta: - '@types/node': - optional: true - jiti: - optional: true - less: - optional: true - lightningcss: - optional: true - sass: - optional: true - sass-embedded: - optional: true - stylus: - optional: true - sugarss: - optional: true - terser: - optional: true - tsx: - optional: true - yaml: - optional: true + lodash.includes@4.3.0: {} - vitest@3.2.4: - resolution: {integrity: sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==} - engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} - hasBin: true - peerDependencies: - '@edge-runtime/vm': '*' - '@types/debug': ^4.1.12 - '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 - '@vitest/browser': 3.2.4 - '@vitest/ui': 3.2.4 - happy-dom: '*' - jsdom: '*' - peerDependenciesMeta: - '@edge-runtime/vm': - optional: true - '@types/debug': - optional: true - '@types/node': - optional: true - '@vitest/browser': - optional: true - '@vitest/ui': - optional: true - happy-dom: - optional: true - jsdom: - optional: true + lodash.isboolean@3.0.3: {} - why-is-node-running@2.3.0: - resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} - engines: {node: '>=8'} - hasBin: true + lodash.isinteger@4.0.4: {} - yaml@2.8.2: - resolution: {integrity: sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==} - engines: {node: '>= 14.6'} - hasBin: true + lodash.isnumber@3.0.3: {} - zod@4.3.6: - resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} + lodash.isplainobject@4.0.6: {} -snapshots: + lodash.isstring@4.0.1: {} - '@biomejs/biome@1.9.4': - optionalDependencies: - '@biomejs/cli-darwin-arm64': 1.9.4 - '@biomejs/cli-darwin-x64': 1.9.4 - '@biomejs/cli-linux-arm64': 1.9.4 - '@biomejs/cli-linux-arm64-musl': 1.9.4 - '@biomejs/cli-linux-x64': 1.9.4 - '@biomejs/cli-linux-x64-musl': 1.9.4 - '@biomejs/cli-win32-arm64': 1.9.4 - '@biomejs/cli-win32-x64': 1.9.4 + lodash.once@4.1.1: {} - '@biomejs/cli-darwin-arm64@1.9.4': - optional: true + long@5.3.2: {} - '@biomejs/cli-darwin-x64@1.9.4': - optional: true + loupe@3.2.1: {} - '@biomejs/cli-linux-arm64-musl@1.9.4': - optional: true + lowercase-keys@3.0.0: {} - '@biomejs/cli-linux-arm64@1.9.4': - optional: true + lru-cache@10.4.3: {} - '@biomejs/cli-linux-x64-musl@1.9.4': - optional: true + lru-cache@11.2.7: {} - '@biomejs/cli-linux-x64@1.9.4': - optional: true + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 - '@biomejs/cli-win32-arm64@1.9.4': - optional: true + marked@15.0.12: {} - '@biomejs/cli-win32-x64@1.9.4': - optional: true + math-intrinsics@1.1.0: {} - '@esbuild/aix-ppc64@0.27.3': - optional: true + media-typer@1.1.0: {} - '@esbuild/android-arm64@0.27.3': - optional: true + merge-descriptors@2.0.0: {} - '@esbuild/android-arm@0.27.3': - optional: true + mime-db@1.52.0: {} - '@esbuild/android-x64@0.27.3': - optional: true + mime-db@1.54.0: {} - '@esbuild/darwin-arm64@0.27.3': - optional: true + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 - '@esbuild/darwin-x64@0.27.3': - optional: true + mime-types@3.0.2: + dependencies: + mime-db: 1.54.0 - '@esbuild/freebsd-arm64@0.27.3': - optional: true + mime@4.0.7: {} - '@esbuild/freebsd-x64@0.27.3': + mimic-response@3.1.0: optional: true - '@esbuild/linux-arm64@0.27.3': - optional: true + mimic-response@4.0.0: {} - '@esbuild/linux-arm@0.27.3': - optional: true + minimatch@10.2.4: + dependencies: + brace-expansion: 5.0.4 - '@esbuild/linux-ia32@0.27.3': + minimist@1.2.8: optional: true - '@esbuild/linux-loong64@0.27.3': - optional: true + minipass@7.1.3: {} - '@esbuild/linux-mips64el@0.27.3': + mkdirp-classic@0.5.3: optional: true - '@esbuild/linux-ppc64@0.27.3': - optional: true + mnemonist@0.40.3: + dependencies: + obliterator: 2.0.5 - '@esbuild/linux-riscv64@0.27.3': - optional: true + module-details-from-path@1.0.4: {} - '@esbuild/linux-s390x@0.27.3': - optional: true + ms@2.1.3: {} - '@esbuild/linux-x64@0.27.3': - optional: true + nanoid@3.3.11: {} - '@esbuild/netbsd-arm64@0.27.3': + napi-build-utils@2.0.0: optional: true - '@esbuild/netbsd-x64@0.27.3': - optional: true + negotiator@1.0.0: {} - '@esbuild/openbsd-arm64@0.27.3': + node-abi@3.89.0: + dependencies: + semver: 7.7.4 optional: true - '@esbuild/openbsd-x64@0.27.3': + node-addon-api@4.3.0: optional: true - '@esbuild/openharmony-arm64@0.27.3': + node-addon-api@7.1.1: optional: true - '@esbuild/sunos-x64@0.27.3': - optional: true + node-addon-api@8.6.0: {} - '@esbuild/win32-arm64@0.27.3': - optional: true + node-domexception@1.0.0: {} - '@esbuild/win32-ia32@0.27.3': - optional: true + node-fetch@2.7.0: + dependencies: + whatwg-url: 5.0.0 - '@esbuild/win32-x64@0.27.3': - optional: true + node-fetch@3.3.2: + dependencies: + data-uri-to-buffer: 4.0.1 + fetch-blob: 3.2.0 + formdata-polyfill: 4.0.10 - '@jridgewell/sourcemap-codec@1.5.5': {} + node-gyp-build@4.8.4: {} - '@rollup/rollup-android-arm-eabi@4.59.0': + node-pty@1.1.0: + dependencies: + node-addon-api: 7.1.1 optional: true - '@rollup/rollup-android-arm64@4.59.0': - optional: true + normalize-package-data@6.0.2: + dependencies: + hosted-git-info: 7.0.2 + semver: 7.7.4 + validate-npm-package-license: 3.0.4 - '@rollup/rollup-darwin-arm64@4.59.0': - optional: true + normalize-url@8.1.1: {} - '@rollup/rollup-darwin-x64@4.59.0': - optional: true + npm-run-path@6.0.0: + dependencies: + path-key: 4.0.0 + unicorn-magic: 0.3.0 - '@rollup/rollup-freebsd-arm64@4.59.0': - optional: true + object-assign@4.1.1: {} - '@rollup/rollup-freebsd-x64@4.59.0': - optional: true + object-hash@3.0.0: {} - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': - optional: true + object-inspect@1.13.4: {} - '@rollup/rollup-linux-arm-musleabihf@4.59.0': - optional: true + obliterator@2.0.5: {} - '@rollup/rollup-linux-arm64-gnu@4.59.0': - optional: true + on-finished@2.4.1: + dependencies: + ee-first: 1.1.1 - '@rollup/rollup-linux-arm64-musl@4.59.0': - optional: true + once@1.4.0: + dependencies: + wrappy: 1.0.2 - '@rollup/rollup-linux-loong64-gnu@4.59.0': - optional: true + open@10.2.0: + dependencies: + default-browser: 5.5.0 + define-lazy-prop: 3.0.0 + is-inside-container: 1.0.0 + wsl-utils: 0.1.0 - '@rollup/rollup-linux-loong64-musl@4.59.0': - optional: true + p-cancelable@4.0.1: {} - '@rollup/rollup-linux-ppc64-gnu@4.59.0': - optional: true + p-finally@1.0.0: {} - '@rollup/rollup-linux-ppc64-musl@4.59.0': - optional: true + p-queue@6.6.2: + dependencies: + eventemitter3: 4.0.7 + p-timeout: 3.2.0 - '@rollup/rollup-linux-riscv64-gnu@4.59.0': - optional: true + p-retry@4.6.2: + dependencies: + '@types/retry': 0.12.0 + retry: 0.13.1 - '@rollup/rollup-linux-riscv64-musl@4.59.0': - optional: true + p-timeout@3.2.0: + dependencies: + p-finally: 1.0.0 - '@rollup/rollup-linux-s390x-gnu@4.59.0': - optional: true + package-json-from-dist@1.0.1: {} - '@rollup/rollup-linux-x64-gnu@4.59.0': - optional: true + parse-json@8.3.0: + dependencies: + '@babel/code-frame': 7.29.0 + index-to-position: 1.2.0 + type-fest: 4.41.0 - '@rollup/rollup-linux-x64-musl@4.59.0': - optional: true + parse-ms@4.0.0: {} - '@rollup/rollup-openbsd-x64@4.59.0': - optional: true + parseley@0.12.1: + dependencies: + leac: 0.6.0 + peberminta: 0.9.0 - '@rollup/rollup-openharmony-arm64@4.59.0': - optional: true + parseurl@1.3.3: {} - '@rollup/rollup-win32-arm64-msvc@4.59.0': - optional: true + path-exists@5.0.0: {} - '@rollup/rollup-win32-ia32-msvc@4.59.0': - optional: true + path-key@3.1.1: {} - '@rollup/rollup-win32-x64-gnu@4.59.0': - optional: true + path-key@4.0.0: {} - '@rollup/rollup-win32-x64-msvc@4.59.0': - optional: true + path-parse@1.0.7: {} - '@types/chai@5.2.3': + path-scurry@2.0.2: dependencies: - '@types/deep-eql': 4.0.2 - assertion-error: 2.0.1 + lru-cache: 11.2.7 + minipass: 7.1.3 - '@types/deep-eql@4.0.2': {} + path-to-regexp@8.3.0: {} - '@types/estree@1.0.8': {} + pathe@2.0.3: {} - '@types/node@22.19.15': - dependencies: - undici-types: 6.21.0 + pathval@2.0.1: {} + + peberminta@0.9.0: {} + + pend@1.2.0: {} + + picocolors@1.1.1: {} + + picomatch@4.0.3: {} - '@vitest/expect@3.2.4': - dependencies: - '@types/chai': 5.2.3 - '@vitest/spy': 3.2.4 - '@vitest/utils': 3.2.4 - chai: 5.3.3 - tinyrainbow: 2.0.0 + pkce-challenge@5.0.1: {} - '@vitest/mocker@3.2.4(vite@7.3.1(@types/node@22.19.15)(yaml@2.8.2))': + postcss@8.5.8: dependencies: - '@vitest/spy': 3.2.4 - estree-walker: 3.0.3 - magic-string: 0.30.21 - optionalDependencies: - vite: 7.3.1(@types/node@22.19.15)(yaml@2.8.2) + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 - '@vitest/pretty-format@3.2.4': + prebuild-install@7.1.3: dependencies: - tinyrainbow: 2.0.0 + detect-libc: 2.1.2 + expand-template: 2.0.3 + github-from-package: 0.0.0 + minimist: 1.2.8 + mkdirp-classic: 0.5.3 + napi-build-utils: 2.0.0 + node-abi: 3.89.0 + pump: 3.0.4 + rc: 1.2.8 + simple-get: 4.0.1 + tar-fs: 2.1.4 + tunnel-agent: 0.6.0 + optional: true - '@vitest/runner@3.2.4': + pretty-ms@9.3.0: dependencies: - '@vitest/utils': 3.2.4 - pathe: 2.0.3 - strip-literal: 3.1.0 + parse-ms: 4.0.0 - '@vitest/snapshot@3.2.4': + proper-lockfile@4.1.2: dependencies: - '@vitest/pretty-format': 3.2.4 - magic-string: 0.30.21 - pathe: 2.0.3 + graceful-fs: 4.2.11 + retry: 0.12.0 + signal-exit: 3.0.7 - '@vitest/spy@3.2.4': + proto3-json-serializer@2.0.2: dependencies: - tinyspy: 4.0.4 + protobufjs: 7.5.4 - '@vitest/utils@3.2.4': + protobufjs@7.5.4: dependencies: - '@vitest/pretty-format': 3.2.4 - loupe: 3.2.1 - tinyrainbow: 2.0.0 - - assertion-error@2.0.1: {} + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 22.19.15 + long: 5.3.2 - cac@6.7.14: {} + protobufjs@8.0.0: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 22.19.15 + long: 5.3.2 - chai@5.3.3: + proxy-addr@2.0.7: dependencies: - assertion-error: 2.0.1 - check-error: 2.1.3 - deep-eql: 5.0.2 - loupe: 3.2.1 - pathval: 2.0.1 + forwarded: 0.2.0 + ipaddr.js: 1.9.1 - check-error@2.1.3: {} + proxy-from-env@1.1.0: {} - commander@10.0.1: {} + pump@3.0.4: + dependencies: + end-of-stream: 1.4.5 + once: 1.4.0 - debug@4.4.3: + pumpify@2.0.1: dependencies: - ms: 2.1.3 + duplexify: 4.1.3 + inherits: 2.0.4 + pump: 3.0.4 - deep-eql@5.0.2: {} + qs@6.15.0: + dependencies: + side-channel: 1.1.0 - es-module-lexer@1.7.0: {} + quick-lru@5.1.1: {} - esbuild@0.27.3: - optionalDependencies: - '@esbuild/aix-ppc64': 0.27.3 - '@esbuild/android-arm': 0.27.3 - '@esbuild/android-arm64': 0.27.3 - '@esbuild/android-x64': 0.27.3 - '@esbuild/darwin-arm64': 0.27.3 - '@esbuild/darwin-x64': 0.27.3 - '@esbuild/freebsd-arm64': 0.27.3 - '@esbuild/freebsd-x64': 0.27.3 - '@esbuild/linux-arm': 0.27.3 - '@esbuild/linux-arm64': 0.27.3 - '@esbuild/linux-ia32': 0.27.3 - '@esbuild/linux-loong64': 0.27.3 - '@esbuild/linux-mips64el': 0.27.3 - '@esbuild/linux-ppc64': 0.27.3 - '@esbuild/linux-riscv64': 0.27.3 - '@esbuild/linux-s390x': 0.27.3 - '@esbuild/linux-x64': 0.27.3 - '@esbuild/netbsd-arm64': 0.27.3 - '@esbuild/netbsd-x64': 0.27.3 - '@esbuild/openbsd-arm64': 0.27.3 - '@esbuild/openbsd-x64': 0.27.3 - '@esbuild/openharmony-arm64': 0.27.3 - '@esbuild/sunos-x64': 0.27.3 - '@esbuild/win32-arm64': 0.27.3 - '@esbuild/win32-ia32': 0.27.3 - '@esbuild/win32-x64': 0.27.3 + range-parser@1.2.1: {} - estree-walker@3.0.3: + raw-body@3.0.2: dependencies: - '@types/estree': 1.0.8 - - expect-type@1.3.0: {} - - fdir@6.5.0(picomatch@4.0.3): - optionalDependencies: - picomatch: 4.0.3 + bytes: 3.1.2 + http-errors: 2.0.1 + iconv-lite: 0.7.2 + unpipe: 1.0.0 - fsevents@2.3.3: + rc@1.2.8: + dependencies: + deep-extend: 0.6.0 + ini: 1.3.8 + minimist: 1.2.8 + strip-json-comments: 2.0.1 optional: true - graphql@16.13.1: {} + read-package-up@11.0.0: + dependencies: + find-up-simple: 1.0.1 + read-pkg: 9.0.1 + type-fest: 4.41.0 - js-tokens@9.0.1: {} + read-pkg@9.0.1: + dependencies: + '@types/normalize-package-data': 2.4.4 + normalize-package-data: 6.0.2 + parse-json: 8.3.0 + type-fest: 4.41.0 + unicorn-magic: 0.1.0 - liquidjs@10.24.0: + readable-stream@3.6.2: dependencies: - commander: 10.0.1 + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 - loupe@3.2.1: {} + require-directory@2.1.1: {} - magic-string@0.30.21: + require-from-string@2.0.2: {} + + require-in-the-middle@7.5.2: dependencies: - '@jridgewell/sourcemap-codec': 1.5.5 + debug: 4.4.3 + module-details-from-path: 1.0.4 + resolve: 1.22.11 + transitivePeerDependencies: + - supports-color - ms@2.1.3: {} + require-in-the-middle@8.0.1: + dependencies: + debug: 4.4.3 + module-details-from-path: 1.0.4 + transitivePeerDependencies: + - supports-color - nanoid@3.3.11: {} + resolve-alpn@1.2.1: {} - pathe@2.0.3: {} + resolve@1.22.11: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 - pathval@2.0.1: {} + responselike@4.0.2: + dependencies: + lowercase-keys: 3.0.0 - picocolors@1.1.1: {} + retry-request@7.0.2: + dependencies: + '@types/request': 2.48.13 + extend: 3.0.2 + teeny-request: 9.0.0 + transitivePeerDependencies: + - encoding + - supports-color - picomatch@4.0.3: {} + retry@0.12.0: {} - postcss@8.5.8: - dependencies: - nanoid: 3.3.11 - picocolors: 1.1.1 - source-map-js: 1.2.1 + retry@0.13.1: {} rollup@4.59.0: dependencies: @@ -1050,18 +5740,206 @@ snapshots: '@rollup/rollup-win32-x64-msvc': 4.59.0 fsevents: 2.3.3 + router@2.2.0: + dependencies: + debug: 4.4.3 + depd: 2.0.0 + is-promise: 4.0.0 + parseurl: 1.3.3 + path-to-regexp: 8.3.0 + transitivePeerDependencies: + - supports-color + + run-applescript@7.1.0: {} + + safe-buffer@5.2.1: {} + + safer-buffer@2.1.2: {} + + selderee@0.11.0: + dependencies: + parseley: 0.12.1 + + semver@7.7.4: {} + + send@1.2.1: + dependencies: + debug: 4.4.3 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 2.0.0 + http-errors: 2.0.1 + mime-types: 3.0.2 + ms: 2.1.3 + on-finished: 2.4.1 + range-parser: 1.2.1 + statuses: 2.0.2 + transitivePeerDependencies: + - supports-color + + serve-static@2.2.1: + dependencies: + encodeurl: 2.0.0 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 1.2.1 + transitivePeerDependencies: + - supports-color + + setprototypeof@1.2.0: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + shell-quote@1.8.3: {} + + side-channel-list@1.0.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + + side-channel-map@1.0.1: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + + side-channel-weakmap@1.0.2: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + side-channel-map: 1.0.1 + + side-channel@1.1.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + side-channel-list: 1.0.0 + side-channel-map: 1.0.1 + side-channel-weakmap: 1.0.2 + siginfo@2.0.0: {} + signal-exit@3.0.7: {} + + signal-exit@4.1.0: {} + + simple-concat@1.0.1: + optional: true + + simple-get@4.0.1: + dependencies: + decompress-response: 6.0.0 + once: 1.4.0 + simple-concat: 1.0.1 + optional: true + + simple-git@3.33.0: + dependencies: + '@kwsites/file-exists': 1.1.1 + '@kwsites/promise-deferred': 1.1.1 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + source-map-js@1.2.1: {} + spdx-correct@3.2.0: + dependencies: + spdx-expression-parse: 3.0.1 + spdx-license-ids: 3.0.23 + + spdx-exceptions@2.5.0: {} + + spdx-expression-parse@3.0.1: + dependencies: + spdx-exceptions: 2.5.0 + spdx-license-ids: 3.0.23 + + spdx-license-ids@3.0.23: {} + stackback@0.0.2: {} + statuses@2.0.2: {} + std-env@3.10.0: {} + stream-events@1.0.5: + dependencies: + stubs: 3.0.0 + + stream-shift@1.0.3: {} + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string_decoder@1.3.0: + dependencies: + safe-buffer: 5.2.1 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-ansi@7.2.0: + dependencies: + ansi-regex: 6.2.2 + + strip-final-newline@4.0.0: {} + + strip-json-comments@2.0.1: + optional: true + + strip-json-comments@3.1.1: {} + strip-literal@3.1.0: dependencies: js-tokens: 9.0.1 + stubs@3.0.0: {} + + supports-preserve-symlinks-flag@1.0.0: {} + + systeminformation@5.31.4: {} + + tar-fs@2.1.4: + dependencies: + chownr: 1.1.4 + mkdirp-classic: 0.5.3 + pump: 3.0.4 + tar-stream: 2.2.0 + optional: true + + tar-stream@2.2.0: + dependencies: + bl: 4.1.0 + end-of-stream: 1.4.5 + fs-constants: 1.0.0 + inherits: 2.0.4 + readable-stream: 3.6.2 + optional: true + + teeny-request@9.0.0: + dependencies: + http-proxy-agent: 5.0.0 + https-proxy-agent: 5.0.1 + node-fetch: 2.7.0 + stream-events: 1.0.5 + uuid: 9.0.1 + transitivePeerDependencies: + - encoding + - supports-color + tinybench@2.9.0: {} tinyexec@0.3.2: {} @@ -1077,10 +5955,63 @@ snapshots: tinyspy@4.0.4: {} + toidentifier@1.0.1: {} + + tr46@0.0.3: {} + + tree-sitter-bash@0.25.1: + dependencies: + node-addon-api: 8.6.0 + node-gyp-build: 4.8.4 + + tsscmp@1.0.6: {} + + tunnel-agent@0.6.0: + dependencies: + safe-buffer: 5.2.1 + optional: true + + type-fest@4.41.0: {} + + type-is@2.0.1: + dependencies: + content-type: 1.0.5 + media-typer: 1.1.0 + mime-types: 3.0.2 + typescript@5.9.3: {} undici-types@6.21.0: {} + undici@7.24.4: {} + + unicorn-magic@0.1.0: {} + + unicorn-magic@0.3.0: {} + + universalify@2.0.1: {} + + unpipe@1.0.0: {} + + url-template@2.0.8: {} + + util-deprecate@1.0.2: {} + + uuid@11.1.0: {} + + uuid@13.0.0: {} + + uuid@8.3.2: {} + + uuid@9.0.1: {} + + validate-npm-package-license@3.0.4: + dependencies: + spdx-correct: 3.2.0 + spdx-expression-parse: 3.0.1 + + vary@1.1.2: {} + vite-node@3.2.4(@types/node@22.19.15)(yaml@2.8.2): dependencies: cac: 6.7.14 @@ -1115,7 +6046,7 @@ snapshots: fsevents: 2.3.3 yaml: 2.8.2 - vitest@3.2.4(@types/node@22.19.15)(yaml@2.8.2): + vitest@3.2.4(@types/debug@4.1.13)(@types/node@22.19.15)(yaml@2.8.2): dependencies: '@types/chai': 5.2.3 '@vitest/expect': 3.2.4 @@ -1141,6 +6072,7 @@ snapshots: vite-node: 3.2.4(@types/node@22.19.15)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: + '@types/debug': 4.1.13 '@types/node': 22.19.15 transitivePeerDependencies: - jiti @@ -1156,11 +6088,73 @@ snapshots: - tsx - yaml + web-streams-polyfill@3.3.3: {} + + web-tree-sitter@0.25.10: {} + + webidl-conversions@3.0.1: {} + + whatwg-url@5.0.0: + dependencies: + tr46: 0.0.3 + webidl-conversions: 3.0.1 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + why-is-node-running@2.3.0: dependencies: siginfo: 2.0.0 stackback: 0.0.2 + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrappy@1.0.2: {} + + ws@8.19.0: {} + + wsl-utils@0.1.0: + dependencies: + is-wsl: 3.1.1 + + xdg-basedir@5.1.0: {} + + y18n@5.0.8: {} + yaml@2.8.2: {} + yargs-parser@21.1.1: {} + + yargs@17.7.2: + dependencies: + cliui: 8.0.1 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + + yauzl@2.10.0: + dependencies: + buffer-crc32: 0.2.13 + fd-slicer: 1.1.0 + + yoctocolors@2.1.2: {} + + zod-to-json-schema@3.25.0(zod@4.3.6): + dependencies: + zod: 4.3.6 + + zod-to-json-schema@3.25.1(zod@3.25.76): + dependencies: + zod: 3.25.76 + + zod@3.25.76: {} + zod@4.3.6: {} diff --git a/run-pipeline.sh b/run-pipeline.sh new file mode 100755 index 00000000..0fdf5b66 --- /dev/null +++ b/run-pipeline.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Save caller's REPO_URL before sourcing .env +_CALLER_REPO_URL="${REPO_URL:-}" + +# Source .env for LINEAR_API_KEY etc. +if [[ -f "$SCRIPT_DIR/.env" ]]; then + set -a + # shellcheck disable=SC1091 + source "$SCRIPT_DIR/.env" + set +a +fi + +# REPO_URL priority: caller env > script lookup table (not .env) +# .env may set REPO_URL for other tools, but this script uses its own product mapping +if [[ -n "$_CALLER_REPO_URL" ]]; then + REPO_URL="$_CALLER_REPO_URL" +else + unset REPO_URL +fi +unset _CALLER_REPO_URL + +usage() { + cat <<'EOF' +Usage: ./run-pipeline.sh [additional-args...] + +Launch the symphony-ts pipeline for a product. + +Products: + symphony Symphony orchestrator (github.com/mobilyze-llc/symphony-ts) + jony-agent Jony Agent + hs-data Household Services Data + hs-ui Household Services UI + hs-mobile Household Services Mobile + stickerlabs Stickerlabs Factory (github.com/mobilyze-llc/stickerlabs-factory) + household Household + toys TOYS (github.com/mobilyze-llc/pipeline-test-1) + +Options: + -h, --help Show this help message + --auto-build Automatically run 'npm run build' if dist is stale + --skip-build-check Skip the dist staleness check entirely + +Environment: + REPO_URL Override the default repo URL for the product + Example: REPO_URL=https://github.com/org/repo.git ./run-pipeline.sh symphony + +EOF + exit 0 +} + +# Show help if no args or help flag +if [[ $# -eq 0 ]] || [[ "$1" == "--help" ]] || [[ "$1" == "-h" ]]; then + usage +fi + +if [[ "$1" == "--version" ]] || [[ "$1" == "-V" ]]; then + exec node "$SCRIPT_DIR/dist/src/cli/main.js" --version +fi + +PRODUCT="$1" +shift + +# Parse flags before passing remaining args to symphony +AUTO_BUILD=false +SKIP_BUILD_CHECK=false +PASSTHROUGH_ARGS=() +for arg in "$@"; do + case "$arg" in + --auto-build) AUTO_BUILD=true ;; + --skip-build-check) SKIP_BUILD_CHECK=true ;; + *) PASSTHROUGH_ARGS+=("$arg") ;; + esac +done +set -- "${PASSTHROUGH_ARGS[@]+"${PASSTHROUGH_ARGS[@]}"}" + +# Map product → workflow file and default repo URL +case "$PRODUCT" in + symphony) + WORKFLOW="pipeline-config/workflows/WORKFLOW-symphony.md" + DEFAULT_REPO_URL="https://github.com/mobilyze-llc/symphony-ts.git" + ;; + jony-agent) + WORKFLOW="pipeline-config/workflows/WORKFLOW-jony-agent.md" + DEFAULT_REPO_URL="TBD" + ;; + hs-data) + WORKFLOW="pipeline-config/workflows/WORKFLOW-hs-data.md" + DEFAULT_REPO_URL="TBD" + ;; + hs-ui) + WORKFLOW="pipeline-config/workflows/WORKFLOW-hs-ui.md" + DEFAULT_REPO_URL="TBD" + ;; + hs-mobile) + WORKFLOW="pipeline-config/workflows/WORKFLOW-hs-mobile.md" + DEFAULT_REPO_URL="TBD" + ;; + stickerlabs) + WORKFLOW="pipeline-config/workflows/WORKFLOW-stickerlabs.md" + DEFAULT_REPO_URL="https://github.com/mobilyze-llc/stickerlabs-factory.git" + ;; + household) + WORKFLOW="pipeline-config/workflows/WORKFLOW-household.md" + DEFAULT_REPO_URL="TBD" + ;; + toys) + WORKFLOW="pipeline-config/workflows/WORKFLOW-toys.md" + DEFAULT_REPO_URL="https://github.com/mobilyze-llc/pipeline-test-1.git" + ;; + *) + echo "Error: Unknown product '$PRODUCT'" + echo "" + echo "Available products: symphony, jony-agent, hs-data, hs-ui, hs-mobile, stickerlabs, household, toys" + echo "Run './run-pipeline.sh --help' for details." + exit 1 + ;; +esac + +# Use env override if set, otherwise use default +REPO_URL="${REPO_URL:-$DEFAULT_REPO_URL}" + +# For TBD products, require explicit REPO_URL +if [[ "$REPO_URL" == "TBD" ]]; then + echo "Error: No default REPO_URL for '$PRODUCT' — set it via environment variable:" + echo "" + echo " REPO_URL=https://github.com/org/repo.git ./run-pipeline.sh $PRODUCT" + exit 1 +fi + +export REPO_URL + +WORKFLOW_PATH="$SCRIPT_DIR/$WORKFLOW" + +if [[ ! -f "$WORKFLOW_PATH" ]]; then + echo "Error: Workflow file not found: $WORKFLOW_PATH" + echo "Create the workflow file first, then retry." + exit 1 +fi + +# --- Stale dist check --- +if [[ "$SKIP_BUILD_CHECK" != "true" ]]; then + DIST_ENTRY="$SCRIPT_DIR/dist/src/cli/main.js" + if [[ ! -f "$DIST_ENTRY" ]]; then + echo "Error: dist/ not found ($DIST_ENTRY)" + echo " This looks like a fresh clone. Run 'npm run build' first." + if [[ "$AUTO_BUILD" == "true" ]]; then + echo " --auto-build: running 'npm run build'..." + (cd "$SCRIPT_DIR" && npm run build) + else + echo " Or re-run with --auto-build to build automatically." + exit 1 + fi + elif [[ -n "$(find "$SCRIPT_DIR/src" -newer "$DIST_ENTRY" -type f 2>/dev/null)" ]]; then + echo "Warning: dist/ is stale — source files are newer than dist/src/cli/main.js" + if [[ "$AUTO_BUILD" == "true" ]]; then + echo " --auto-build: running 'npm run build'..." + (cd "$SCRIPT_DIR" && npm run build) + else + echo " Run 'npm run build' in symphony-ts/, or re-run with --auto-build." + exit 1 + fi + fi +fi + +echo "Launching pipeline for: $PRODUCT" +echo " Workflow: $WORKFLOW" +echo " Repo URL: $REPO_URL" +echo "" + +# Read port from ports.json +PORTS_FILE="$SCRIPT_DIR/pipeline-config/ports.json" +if [[ -f "$PORTS_FILE" ]] && command -v jq &>/dev/null; then + PRODUCT_PORT=$(jq -r --arg p "$PRODUCT" '.[$p] // empty' "$PORTS_FILE") + if [[ -n "$PRODUCT_PORT" ]]; then + # Inject --port before user args (user can still override via --port in passthrough) + set -- --port "$PRODUCT_PORT" "$@" + fi +fi + +LOGS_DIR="/tmp/symphony-logs-${PRODUCT}" +mkdir -p "$LOGS_DIR" +exec node "$SCRIPT_DIR/dist/src/cli/main.js" "$WORKFLOW_PATH" --acknowledge-high-trust-preview --logs-root "$LOGS_DIR" "$@" diff --git a/scripts/test.mjs b/scripts/test.mjs new file mode 100644 index 00000000..fd60d336 --- /dev/null +++ b/scripts/test.mjs @@ -0,0 +1,22 @@ +#!/usr/bin/env node +/** + * Thin vitest wrapper that maps --grep to vitest's -t , + * so that `npm test -- --grep "..."` works as expected (mocha-compatible CLI). + */ +import { spawnSync } from "node:child_process"; + +const args = process.argv.slice(2); +const translated = []; + +for (let i = 0; i < args.length; i++) { + if (args[i] === "--grep" && i + 1 < args.length) { + translated.push("-t", args[++i]); + } else { + translated.push(args[i]); + } +} + +const result = spawnSync("vitest", ["run", ...translated], { + stdio: "inherit", +}); +process.exit(result.status ?? 1); diff --git a/skills/spec-gen/SKILL.md b/skills/spec-gen/SKILL.md new file mode 100644 index 00000000..83a7495a --- /dev/null +++ b/skills/spec-gen/SKILL.md @@ -0,0 +1,412 @@ +--- +name: spec-gen +description: Generate structured specs from brain dumps. Explores target codebase in plan mode, classifies complexity (trivial/standard/complex), generates specs with Gherkin scenarios and executable verify lines, syncs to Linear as parent issue in Draft state for review. +argument-hint: +--- + +# Spec Generator — Brain Dump to Linear Spec + +You transform unstructured brain dumps into structured, verifiable specifications stored in Linear. Specs live in Linear, not the repo (Decision 32). Iteration happens through chat replies with one-way sync to Linear (Decision 33). + +## Skill Contents + +This skill uses progressive disclosure. Read reference files **when indicated**, not upfront. + +| File | Contents | When to Read | +|------|----------|-------------| +| `references/exploration-checklist.md` | Targeted codebase discovery patterns and scoping rules | **Step 0** — before exploring the codebase | +| `references/complexity-router.md` | Decision tree for trivial/standard/complex classification | **Step 1** — before generating anything | +| `references/verify-line-guide.md` | How to write executable `# Verify:` lines with worked examples | **Step 3** — when writing Gherkin scenarios | +| `references/model-tendencies.md` | Known spec generation artifacts and self-correction checklist | **Step 4** — before finalizing | + +All paths are relative to `~/.claude/skills/spec-gen/`. + +--- + +## Inputs + +The skill accepts one of: +1. **A brain dump** — unstructured text describing what to build +2. **A Linear Idea issue** — an existing issue in `Idea` state (provide the issue identifier, e.g., `SYMPH-42`). The skill reads the issue description as the brain dump and upgrades it to `Draft`. + +### Product Context + +The skill reads Linear config from a WORKFLOW file. The user can provide either: +1. **A WORKFLOW file path** (explicit path to a `.md` file) — used directly, no resolution needed. For ad-hoc projects without a named product entry. +2. **A product name** (e.g., `SYMPH`, `JONY`) — resolves to `/pipeline-config/workflows/WORKFLOW-.md` + +```bash +# Named product example: product "symphony" → +# /pipeline-config/workflows/WORKFLOW-symphony.md +# +# WORKFLOW file contains: +# tracker: +# project_slug: fdba14472043 ← Linear project UUID +# +# Auth: linear handles auth via LINEAR_API_KEY env var or `linear auth login`. +``` + +**Resolution order:** +1. If the user provides a WORKFLOW file path (an explicit path ending in `.md`) → use it directly +2. If the user provides a product name → resolve to `/pipeline-config/workflows/WORKFLOW-.md` +3. If neither → ask: "Which product is this for, or provide a path to the WORKFLOW file?" + +### Repo Path + +The skill needs the local filesystem path to the target repository for codebase exploration (file reads, greps, globs) and to locate WORKFLOW files. + +**Resolution order:** +1. **Explicit in brain dump** — if the user includes a path (e.g., "Repo: ~/projects/my-app"), use it +2. **Current working directory** — if cwd contains project markers (`package.json`, `Cargo.toml`, `go.mod`, `pyproject.toml`, `.git`, etc.), use cwd +3. **Ask** — if neither, ask: "What's the local path to the repo?" + +All codebase exploration should be scoped to this path. Named-product WORKFLOW files are at `/pipeline-config/workflows/WORKFLOW-.md`. + +--- + +## Step 0: Explore Target Codebase + +**Read `references/exploration-checklist.md` now.** This step grounds the spec in actual code reality before any classification or generation happens. + +**Skip this step if:** there is no target repo (greenfield project with no existing code), or the user explicitly says "skip exploration." + +### Enter Plan Mode + +Call `EnterPlanMode` to enter read-only exploration mode. In plan mode you can only read, search, and explore — no file writes, no issue creation. + +### Targeted Exploration + +Use the brain dump keywords to guide a focused exploration of the target codebase. Do NOT audit the entire codebase. Focus on what the brain dump touches. + +**Exploration checklist** (see `references/exploration-checklist.md` for full details with examples): + +1. **Project structure**: Package manager, framework, entry points, directory layout +2. **Relevant modules**: Files and directories the brain dump would touch +3. **Existing patterns**: How similar features are currently implemented (find the closest analog) +4. **Test infrastructure**: Test runner, test directory structure, fixture patterns +5. **Schema/data model**: Database schema, API types, or data structures the change would affect +6. **Dependencies**: External libraries or services involved in the affected area +7. **Prior art**: Has anything similar been attempted before? (check git log) + +### Produce the Codebase Context Report + +Assemble findings into a structured summary. This report is internal working context — it is NOT included in the Linear issue. It informs all subsequent steps. + +``` +## Codebase Context Report + +### Project Overview +- **Stack**: +- **Package manager**: +- **Test runner**: +- **Entry point**:
+ +### Affected Area +- **Files likely touched**: +- **Modules/directories**: +- **Estimated file count**: across + +### Existing Patterns +- **Closest analog**: +- **Pattern to follow**: +- **Conventions**: + +### Test Landscape +- **Test location**: +- **Test patterns**: +- **Fixture approach**: +- **Verify line hints**: + +### Data Model +- **Relevant schema**: +- **Migrations**: + +### Risks and Constraints +- + +### Classification Signal +- Estimated files touched: +- Estimated capabilities: +- Cross-cutting concerns: +- Infrastructure changes needed: +- Unknowns discovered: +``` + +### Exit Plan Mode + +Call `ExitPlanMode` to present the Codebase Context Report to the user. Wait for approval before proceeding to Step 1. + +If the user requests additional exploration or corrections, re-enter plan mode, update the report, and re-present. + +--- + +## Step 1: Classify Complexity + +**Read `references/complexity-router.md` now.** Classification happens BEFORE any spec content is generated. + +If Step 0 ran, use the **Classification Signal** section from the Codebase Context Report to inform classification. The report provides concrete file counts, capability counts, cross-cutting analysis, and unknown counts — use these instead of estimating from the brain dump alone. + +Analyze the brain dump and classify as one of: + +| Tier | Action | +|------|--------| +| **TRIVIAL** | Skip spec. Create a single Linear issue directly in `Todo` state using `freeze-and-queue.sh --trivial "Title" `. Pipe a description to stdin if needed. No parent issue, no sub-issues, no Gherkin. Done. | +| **STANDARD** | Generate full spec → create parent issue in `Draft` state (Steps 2-5). | +| **COMPLEX** | Generate full spec (Steps 2-5). Flag for ensemble review before freezing. | + +**State your classification and reasoning before proceeding.** Examples: + +> **Classification: TRIVIAL** +> Rationale: Single-file bug fix with known root cause and known fix. No design ambiguity. + +```bash +# Create trivial issue directly in Todo: +bash ~/.claude/skills/spec-gen/scripts/freeze-and-queue.sh \ + --trivial "Fix DELETE /api/tasks 500 on non-numeric ID" + +# With a description piped in: +echo "Return 400 instead of 500 when id param is non-numeric" | \ + bash ~/.claude/skills/spec-gen/scripts/freeze-and-queue.sh \ + --trivial "Fix DELETE /api/tasks 500 on non-numeric ID" +``` + +> **Classification: STANDARD** +> Rationale: Single capability (pagination), clear scope (3 endpoints affected), no architectural decisions needed. Estimated 3 tasks. + +### Idea Issue Upgrade + +If the input is an existing `Idea` issue: +1. Read the issue description from Linear +2. Use it as the brain dump for classification +3. On spec creation, update the existing issue (move to `Draft`) rather than creating a new one + +--- + +## Step 2: Generate Spec Content + +If Step 0 ran, use the Codebase Context Report to write accurate file paths in Task Scope fields, follow the structure described in Existing Patterns, reference the correct framework and runtime in verify lines, and set accurate Out of Scope boundaries based on what the codebase actually contains. + +Generate the spec as a single markdown document. This will become the Linear parent issue description. + +```markdown +# + +## Problem + + +## Solution + + +## Scope +### In Scope +- + +### Out of Scope +- + +## Acceptance Criteria +- AC1: +- AC2: + +## Scenarios + +### Feature: + +\`\`\`gherkin +Scenario: + Given + When I + Then + # Verify: + And + # Verify: +\`\`\` + +## Boundaries +### Always +- + +### Never +- + +## Tasks + +### Task 1: +**Priority**: <1-3, lower = more urgent> +**Scope**: <comma-separated file paths> +**Scenarios**: <which scenarios this task covers> + +### Task 2: ... +``` + +Keep proposals and tasks in a single document — they share context in the Linear issue description. + +--- + +## Step 3: Write Verify Lines + +**Read `references/verify-line-guide.md` now.** + +If Step 0 ran, use the **Test Landscape** section from the Codebase Context Report to use the correct test runner command (e.g., `bun test` vs `npx jest` vs `pytest`), follow the project's test file naming convention for `# Test:` directives, and match fixture patterns observed in existing tests. + +### Verify Line Rules (MANDATORY) + +- Every THEN and AND clause **MUST** have a `# Verify:` line immediately after it. +- Verify lines are shell commands. Exit 0 = pass, non-zero = fail. +- Use `$BASE_URL` for HTTP targets, never hardcoded localhost. +- Each verify line must be self-contained — no dependency on previous verify lines. +- Use `curl -sf` for success cases, `curl -s` for error cases (checking status codes). +- Use `jq -e` (not `jq`) to get non-zero exit on false. + +### Test Directives (OPTIONAL) + +`# Test:` directives tell the implementing agent to generate a persistent test file. Use when: +- Internal logic can't be verified through external behavior alone +- Edge cases need programmatic test coverage beyond verify lines +- You want tests that persist in the repo for CI + +```gherkin +Then the cache is invalidated after update +# Verify: bun test tests/cache.test.ts +# Test: Unit test that cache TTL resets when a task is updated +``` + +--- + +## Step 4: Self-Review + +**Read `references/model-tendencies.md` now.** + +Before presenting the spec to the user, check: + +- [ ] Every THEN/AND has a `# Verify:` line +- [ ] All verify lines use `$BASE_URL` +- [ ] No `$BASE_URL` in assertion values +- [ ] `jq -e` used (not bare `jq`) +- [ ] Error cases use `-s` not `-sf` +- [ ] Acceptance criteria are specific (no "should handle gracefully") +- [ ] Task count matches complexity tier (1-2 for STANDARD, 7+ for COMPLEX) +- [ ] No scope creep beyond the brain dump +- [ ] File paths in Task Scope match actual files discovered in Step 0 (no invented paths) +- [ ] Verify line commands use the project's actual test runner and patterns +- [ ] Spec structure follows existing patterns identified in Step 0 (not a novel architecture) + +If any check fails, fix the spec before presenting it. + +--- + +## Step 5: Sync to Linear (Parent Only) + +After presenting the spec to the user and getting approval, use `freeze-and-queue.sh` for ALL Linear issue operations. **Do NOT create issues via inline `linear` commands or raw GraphQL — always use the script.** + +### Create Parent Issue (new spec) + +Write the spec content to a temp file and run the script with `--parent-only` to create ONLY the parent issue in Draft state (no sub-issues). + +```bash +# Create parent issue only (no sub-issues): +cat /tmp/spec-content.md | bash ~/.claude/skills/spec-gen/scripts/freeze-and-queue.sh --parent-only <workflow-path> + +# Or with a spec file: +bash ~/.claude/skills/spec-gen/scripts/freeze-and-queue.sh --parent-only <workflow-path> /tmp/spec-content.md + +# Dry run first to verify parsing: +cat /tmp/spec-content.md | bash ~/.claude/skills/spec-gen/scripts/freeze-and-queue.sh --parent-only --dry-run <workflow-path> +``` + +The script automatically: +- Resolves the team ID and project ID from the WORKFLOW file's `project_slug` +- Looks up state UUIDs (Draft for parent) +- Creates the parent issue with `[Spec]` title prefix +- Prints the parent issue identifier and URL + +Return the Linear deep link from the script output to the user for review. + +### Update Parent Issue (iteration) + +On subsequent invocations where the user requests changes: +1. Accept the change request in chat +2. Regenerate the spec with the requested changes +3. Update the existing parent issue using `--update` with `--parent-only` (no sub-issues during iteration): + +```bash +cat /tmp/spec-content.md | bash ~/.claude/skills/spec-gen/scripts/freeze-and-queue.sh \ + --parent-only --update <PARENT_ISSUE_ID> <workflow-path> +``` + +4. Return the updated deep link from the script output + +**Sync is always one-way.** Out-of-band edits in the Linear UI get overwritten on next sync. + +### Upgrade Idea Issue + +If the input was an existing `Idea` issue: +1. Use `--parent-only --update` with the existing issue ID to update its description and move it to Draft: + +```bash +cat /tmp/spec-content.md | bash ~/.claude/skills/spec-gen/scripts/freeze-and-queue.sh \ + --parent-only --update <IDEA_ISSUE_ID> <workflow-path> +``` + +2. Return the deep link + +### Debugging Reference + +<details> +<summary>State UUID lookup (for debugging only — do NOT use for issue creation)</summary> + +If you need to inspect team states for debugging purposes: + +```bash +# List all statuses for a team (via GraphQL — no built-in statuses command) +linear api '{ workflowStates(filter: { team: { key: { eq: "SYMPH" } } }) { nodes { id name type } } }' + +# List projects (optionally filter by team) +linear project list --team SYMPH + +# Raw GraphQL via linear (uses configured auth automatically) +linear api '{ viewer { id name } }' +``` + +These queries are handled automatically by `freeze-and-queue.sh` during normal operation. + +</details> + +### Done — Next: Freeze + +The parent issue is now in `Draft` state in Linear. Share the link with the user for review. + +**When the user is ready to freeze** (create sub-issues for autonomous pipeline execution), they should invoke: + +`/spec-freeze <PARENT_ISSUE_ID> <workflow-path>` + +This is a separate skill invocation — the freeze operation is structurally separated to enforce the review gate. + +--- + +## Parent Issue Lifecycle + +``` +Idea → Draft + ↑ + (iterate via chat, one-way sync to Linear) +``` + +- **Idea**: Raw concept, no spec. Optional starting point. +- **Draft**: `/spec-gen` has run. Full spec in description. Actively iterating via chat. + +--- + +## Gotchas + +- **Don't invent requirements.** The spec should capture what was asked, not what you think should be asked. Scope creep is the most common spec generation artifact. +- **Verify lines are NOT tests.** They are behavioral checks run by the implementing agent. They should be fast, self-contained, and deterministic. +- **One-way sync only.** Never parse spec content back from Linear. The skill is the source of truth during iteration; Linear is the store. +- **Don't generate design.md for STANDARD features.** Only COMPLEX features need architectural documentation. + +## Related Skills + +- `/spec-freeze` — freeze a drafted spec into Linear sub-issues for autonomous pipeline execution +- `/pipeline-review` — headless adversarial review for the review stage (runs AFTER implementation) +- `/council-review` — multi-model cross-examination review (for highest-assurance review) +- `/adversarial-review` — interactive multi-model development + review cycle diff --git a/skills/spec-gen/gates/spec-gate.yaml b/skills/spec-gen/gates/spec-gate.yaml new file mode 100644 index 00000000..3d4db7e5 --- /dev/null +++ b/skills/spec-gen/gates/spec-gate.yaml @@ -0,0 +1,33 @@ +name: spec-validation +description: Ensemble gate for spec validation before freeze +max_rounds: 3 + +reviewers: + - role: PM + model: claude + system_prompt: | + You are a Product Manager reviewing this spec for completeness and testability. + Check: Are all acceptance criteria covered by scenarios? Are verify lines executable? + Can an agent implement this without ambiguity? + verdict_required: true + + - role: Architect + model: claude + system_prompt: | + You are a Software Architect reviewing this spec for technical feasibility. + Check: Is the proposed approach sound? Are there missing edge cases? + Are the verify lines testing the right things? + verdict_required: true + + - role: VoC + model: gemini + system_prompt: | + You are a Voice of the Customer evaluating this spec for user impact. + Check: Does this deliver value? Are error cases handled gracefully? + Would a user find this API intuitive? + verdict_required: false + +aggregate: + pass: all_required_pass + concerns: any_reviewer_concerns + fail: any_required_fail diff --git a/skills/spec-gen/references/complexity-router.md b/skills/spec-gen/references/complexity-router.md new file mode 100644 index 00000000..df391acf --- /dev/null +++ b/skills/spec-gen/references/complexity-router.md @@ -0,0 +1,176 @@ +# Complexity Router — Decision Tree + +This is the first decision you make. Classify the brain dump BEFORE generating any artifacts. + +## Classification Decision Tree + +``` +Is this a one-liner, bug fix, config change, or file operation? +├── YES → TRIVIAL +└── NO + ├── How many capabilities does it touch? + │ ├── 1 capability, ≤2 tasks, clear scope → STANDARD + │ └── 2+ capabilities, OR architectural change, OR 7+ tasks → COMPLEX + └── Ambiguous? → Default to STANDARD (see Signal Detection below) +``` + +--- + +## Tier Definitions + +### TRIVIAL — Skip spec, create single Linear issue in Todo + +**Definition**: A change with no design ambiguity. The description IS the implementation plan. + +**Signals** (any ONE is sufficient): +- Single file changed +- Fix is mechanical (typo, version bump, env var, config toggle) +- No behavioral change to end users +- Copy/move/rename operation +- Dependency update with no API change +- Bug fix where the root cause and fix are already known + +**Action**: Do NOT generate a spec. Create a single Linear issue directly in `Todo` state with: +- Title from the brain dump +- Description with enough detail for an agent to implement +- Priority based on urgency +- No parent issue, no sub-issues — symphony picks it up directly + +**Examples**: +| Brain Dump | Why Trivial | +|------------|-------------| +| "Fix typo in README — 'recieve' should be 'receive'" | Single character fix, no design | +| "Update BASE_URL env var from port 3000 to 8080" | Config change, one file | +| "Copy the run-pipeline.sh script to the new repo" | File operation | +| "Bump Hono from 4.5 to 4.6" | Dependency update, no API change | +| "Add .wrangler/ to .gitignore" | Single-line config append | +| "Fix the 500 on DELETE /api/tasks when id is non-numeric — return 400 instead" | Bug fix with known root cause and known fix | + +**Counter-examples (NOT trivial despite sounding simple)**: +| Brain Dump | Why NOT Trivial | +|------------|-----------------| +| "Add pagination" | Touches query logic, response shape, and possibly frontend — STANDARD | +| "Fix the slow API" | Root cause unknown, may require investigation — at least STANDARD | +| "Add dark mode" | Touches many files, needs design decisions — COMPLEX | + +--- + +### STANDARD — Generate spec → parent issue in Draft → freeze to sub-issues + +**Definition**: A single capability with clear scope that decomposes into 1-2 tasks. + +**Signals** (most must be present): +- One new feature or one behavior change +- Touches 2-6 files +- Clear acceptance criteria can be written +- No architectural decisions needed (uses existing patterns) +- Can be described in 1-2 sentences +- Does not introduce new infrastructure (databases, queues, external services) + +**Action**: Generate full spec as a single markdown document containing: +1. Problem/Solution/Scope — WHY this change matters +2. Gherkin scenarios with `# Verify:` lines (MANDATORY) and `# Test:` directives (optional) +3. Task list with Priority/Scope/Scenarios + +Create a parent Linear issue in `Draft` state with the spec as the issue description. Iterate via chat. On freeze, create sub-issues in `Todo` and move parent to `Backlog`. + +**Examples**: +| Brain Dump | Capabilities | Estimated Tasks | +|------------|-------------|-----------------| +| "Add pagination to GET /api/tasks — page, limit params, total count header" | 1 (pagination) | 3 (query logic, response format, edge cases) | +| "Add user authentication with email/password" | 1 (auth) | 4 (model, signup, login, middleware) | +| "Add a /health endpoint that returns service status and uptime" | 1 (health check) | 2 (endpoint, response format) | +| "Add rate limiting — 100 req/min per IP with 429 response" | 1 (rate limiting) | 3 (middleware, config, response) | +| "Add soft delete to tasks — deletedAt timestamp, exclude from listings" | 1 (soft delete) | 4 (schema migration, delete endpoint, list filter, restore endpoint) | +| "Add input validation with Zod schemas for all endpoints" | 1 (validation) | 3 (schemas, middleware, error formatting) | + +--- + +### COMPLEX — Generate spec + ensemble gate → parent issue in Draft → freeze to sub-issues + +**Definition**: A change that spans multiple capabilities, requires architectural decisions, or has cross-cutting concerns. + +**Signals** (any ONE is sufficient): +- Introduces a new data model or significantly changes an existing one +- Requires a new external service integration (database, queue, third-party API) +- Touches 7+ files or 3+ distinct subsystems +- Has cross-cutting concerns (auth, logging, error handling that affects everything) +- Requires design tradeoffs with no obvious right answer +- Changes the system's deployment model or infrastructure +- Multiple stakeholders would have opinions + +**Action**: Same as STANDARD, plus: +1. Include a `## Design` section in the spec — HOW (architecture decisions, tradeoffs, alternatives considered) +2. Run ensemble gate with PM/Architect/VoC reviewers before freeze +3. If ensemble returns CONCERNS, iterate on the spec before freezing +4. On freeze, add ensemble gate flag to sub-issues + +**Examples**: +| Brain Dump | Why Complex | +|------------|-------------| +| "Redesign the data model to support multi-tenant" | New data model, cross-cutting (every query needs tenant scope) | +| "Add real-time sync with WebSocket support" | New infrastructure (WebSocket server), new data flow pattern | +| "Add a recommendation engine based on user behavior" | New subsystem (ML/analytics), new data pipeline | +| "Migrate from SQLite to PostgreSQL with connection pooling" | Infrastructure change, affects all queries | +| "Add an admin dashboard with role-based access control" | Multiple capabilities (dashboard, RBAC, UI), 10+ tasks | +| "Add offline support with conflict resolution" | Cross-cutting (sync, storage, conflict resolution, UI states) | + +--- + +## Signal Detection for Ambiguous Cases + +When a brain dump doesn't clearly fit one tier, use these disambiguation rules: + +### Rule 1: When in doubt, choose STANDARD over TRIVIAL +A TRIVIAL classification means no spec is generated. If there's any chance the agent would benefit from Gherkin scenarios and verify lines, classify as STANDARD. The cost of an unnecessary spec is low; the cost of a missing spec is high (wasted implementation cycles, no verification). + +### Rule 2: When in doubt between STANDARD and COMPLEX, check for cross-cutting +Ask: "Does this change require me to modify code I wasn't planning to modify?" If yes → COMPLEX. If the change is additive (new files, new endpoints) with no modification to existing code → STANDARD. + +### Rule 3: "Add X" with a known pattern is STANDARD +If the brain dump says "add X" and you can point to an existing example of X in the codebase (or a well-known pattern), it's STANDARD. The pattern removes ambiguity. + +### Rule 4: "Change X" or "redesign X" is usually COMPLEX +Modifications to existing behavior have higher blast radius than additions. If existing tests, contracts, or consumers are affected, lean COMPLEX. + +### Rule 5: Count the unknowns +- 0 unknowns → TRIVIAL or STANDARD +- 1-2 unknowns → STANDARD (unknowns get resolved during spec generation) +- 3+ unknowns → COMPLEX (unknowns need architectural investigation) + +### Rule 6: Estimate, then check +If you estimate 1-2 tasks → STANDARD. If you estimate 7+ tasks → COMPLEX. If you estimate 1 task → TRIVIAL (unless it's a behavioral change with verification needs). +<!-- TODO(SYMPH-57): This leaves a 3-6 task gap between STANDARD (≤2) and COMPLEX (7+). A follow-up issue should decide whether 3-6 tasks maps to COMPLEX or whether the COMPLEX threshold should be lowered to 3+. --> + +--- + +## Existing Spec Detection + +Before classifying, check for existing parent issues in the target Linear project: + +- **No existing parent issue**: New spec — create a parent issue in `Draft` state. +- **Existing `Idea` issue**: Upgrade path — update the issue with generated spec and move to `Draft`. +- **Existing `Draft` issue for same capability**: Iteration — update the existing parent issue description (one-way sync). +- **Existing `Backlog` issue with sub-issues**: Already frozen — this is a new spec for a different capability, or requires unfreezing (out of scope for this skill). + +### Signals that affect classification +- Existing specs in Linear cover the same capability → this is iteration on an existing `Draft`, not a new spec +- Existing specs cover adjacent capabilities → check for cross-cutting impact (may push STANDARD → COMPLEX) +- Existing sub-issues in `Todo` → spec is already frozen, this may be a new feature or a continuation requiring a separate parent + +--- + +## Quick Reference Table + +| Signal | Trivial | Standard | Complex | +|--------|---------|----------|---------| +| Files changed | 1 | 2-6 | 7+ | +| Tasks | 0-1 | 1-2 | 7+ | +| Capabilities | 0 | 1 | 2+ | +| Design decisions | None | Minimal | Multiple | +| Infrastructure changes | None | None | Yes | +| Cross-cutting concerns | No | No | Yes | +| Parent issue? | No (single Todo issue) | Yes (Draft → Backlog) | Yes (Draft → Backlog) | +| Spec in Linear? | No | Yes | Yes + Design section | +| Ensemble gate? | No | No | Yes | +| Unknowns | 0 | 0-2 | 3+ | diff --git a/skills/spec-gen/references/exploration-checklist.md b/skills/spec-gen/references/exploration-checklist.md new file mode 100644 index 00000000..00adf05e --- /dev/null +++ b/skills/spec-gen/references/exploration-checklist.md @@ -0,0 +1,132 @@ +# Exploration Checklist — Targeted Codebase Discovery + +This checklist guides Step 0 exploration. Use the brain dump keywords to scope your search. +Do NOT explore the entire codebase. Focus on what the brain dump touches. + +--- + +## 1. Project Structure (always — takes 10 seconds) + +Identify the basics that inform every subsequent decision. + +**What you need:** +- Language and runtime (TypeScript/Bun, Python/Flask, Go, etc.) +- Package manager (bun, npm, pnpm, pip, cargo) +- Framework (Hono, Express, FastAPI, Gin, etc.) +- Build tool if applicable (tsc, vite, webpack) +- Entry point (main application file) + +**How:** Read `package.json` (or equivalent), check for config files, glob for entry points. + +## 2. Relevant Modules (use brain dump keywords) + +Find the files and directories the brain dump would touch. + +**How:** +- Grep for keywords from the brain dump (feature names, domain terms) +- Glob for related file names +- Read the entry point and trace the import chain to the affected area + +**What you need:** +- Specific file paths that would change (not directory guesses) +- The module boundary — where does one concern end and another begin? +- Import/dependency chains — what else gets pulled in? + +## 3. Existing Patterns (find the closest analog) + +The most valuable discovery. Finding how a similar feature is already implemented saves the most spec-writing time. + +**How:** +- Grep for patterns similar to what the brain dump describes +- Read a representative handler/controller/route end-to-end +- Note the layering: route definition → handler → service → data access + +**What you need:** +- The closest existing analog to the requested feature +- How it is structured (what layers, what patterns) +- What conventions it follows (naming, error format, response shape) +- If no analog exists, note that explicitly — it changes classification + +## 4. Test Infrastructure + +Determines how verify lines and test directives should be written. + +**How:** +- Glob for test files (`**/*.test.*`, `**/*.spec.*`, `**/test_*`) +- Read a representative test close to the affected area +- Check for test config files (`jest.config*`, `vitest.config*`, etc.) +- Check test scripts in package.json + +**What you need:** +- Test runner and command to invoke it +- Test file location pattern (colocated vs. separate test directory) +- How fixtures/setup work (factories, seed data, beforeEach patterns) +- E2E test framework if present (Playwright, Cypress, etc.) + +## 5. Schema and Data Model + +If the brain dump touches data, understand the current model. + +**How:** +- Glob for schema definitions (`**/schema*`, `**/migration*`, `**/models/*`) +- Grep for type/interface definitions related to the brain dump keywords +- Check for ORM patterns (prisma, drizzle, typeorm, sqlalchemy) + +**What you need:** +- Tables/types/interfaces the change would affect +- Whether migrations exist and how they work +- The data access pattern (direct SQL, ORM, repository pattern) + +## 6. Dependencies and External Services + +Check what external dependencies the affected area uses. + +**How:** +- Grep for service names, SDK names, API URL patterns in the affected area +- Check for environment variables used in the affected area + +**What you need:** +- External services the affected area talks to +- Environment variables it needs +- SDKs or client libraries in use + +## 7. Prior Art (git history) + +Check if this has been attempted or related work exists. + +**How:** +- `git log --oneline --all --grep="<keyword>"` for related commits +- `git branch -a | grep -i "<keyword>"` for related branches +- `git log --oneline -10 -- <affected-file-paths>` for recent changes + +**What you need:** +- Whether this was tried before (and reverted?) +- Recent changes that might conflict +- Related features that were added recently + +--- + +## Scoping the Exploration + +The depth self-adjusts based on what you find: + +**For a trivial-sounding brain dump** (typo fix, config change): Sections 1 and 2 only. +Confirm the file exists, confirm the fix is mechanical, done. + +**For a standard-sounding brain dump** (add a feature): Sections 1-5. +Full exploration of the affected area but no cross-cutting investigation. + +**For a complex-sounding brain dump** (redesign, new subsystem): Sections 1-7. +Full exploration including dependency chains, prior art, and cross-cutting concerns. + +If Section 2 reveals the change touches 8 files across 3 modules, go deeper. If it shows a single-file change, wrap up quickly. + +--- + +## Common Exploration Mistakes + +- **Exploring everything**: Don't read files unrelated to the brain dump. Use the keywords to scope. +- **Stopping too early**: Finding the first relevant file is not enough. Trace the full call chain. +- **Ignoring tests**: The test landscape directly determines verify line quality. Always check. +- **Missing the analog**: If an existing feature does something similar, finding it saves the most spec-writing time. Invest here. +- **Not recording specifics**: "Uses TypeScript" is too vague. "TypeScript with Hono framework, Bun runtime, Drizzle ORM, tests in tests/ using bun test" is what the spec needs. diff --git a/skills/spec-gen/references/model-tendencies.md b/skills/spec-gen/references/model-tendencies.md new file mode 100644 index 00000000..7206417c --- /dev/null +++ b/skills/spec-gen/references/model-tendencies.md @@ -0,0 +1,79 @@ +# Model Tendencies — Spec Generation + +Known patterns to watch for when Claude generates specs. Use these to self-correct during spec generation and to anticipate issues the ensemble gate will flag. + +--- + +## Claude (Spec Author) + +### Strengths +- Excellent at structuring brain dumps into coherent capabilities +- Good at generating realistic Gherkin scenarios +- Naturally produces acceptance criteria that map to testable outcomes +- Strong at identifying edge cases and error scenarios + +### Known Spec Generation Artifacts + +- **Over-specification**: Generates 15 scenarios when 6 would cover the behavior. Trim to what matters. Each scenario should test a distinct behavioral path, not a minor variation. + +- **Verify line verbosity**: Writes multi-line verify commands when a single `curl | jq` pipeline would suffice. Keep verify lines to one line where possible. + +- **Missing error scenarios**: Strong on happy paths, weaker on error cases. After generating scenarios, ask: "What happens when input is missing? Invalid? Too large? Unauthorized?" Add scenarios for each. + +- **Vague acceptance criteria**: Writes AC like "the system should handle errors gracefully." Replace with specific, testable criteria: "POST /api/tasks with missing title returns 400 with `{error: 'title is required'}`." + +- **Task granularity mismatch**: Either decomposes into too many tiny tasks (1 task per endpoint) or too few large tasks (1 task for entire feature). Target 1-2 tasks for STANDARD features. + +- **Scope creep in specs**: Brain dump says "add pagination" but the spec includes sorting, filtering, search, and caching. Stick to what was asked. Extra capabilities should be separate brain dumps. + +- **$BASE_URL in assertion values**: Puts `$BASE_URL` inside jq assertions instead of only in curl URLs. Linter catches this, but avoid it in the first place. + +- **Verify lines that depend on ordering**: Assumes tasks will have sequential IDs or specific creation order. Use creation-then-assertion patterns (create the data, then check it) instead of assuming pre-existing state. + +- **Forgetting the `# Verify:` line entirely**: When writing complex scenarios with multiple AND clauses, sometimes generates the Gherkin without any verify lines. The linter will catch this, but aim to write them inline with the scenario. + +### Blind Spots + +- **Infrastructure assumptions**: Generates verify lines that assume a specific runtime (e.g., Bun vs Node) without checking. Use portable commands. +- **Concurrent access scenarios**: Rarely generates scenarios for concurrent requests or race conditions unless explicitly prompted. +- **Data cleanup**: Verify lines that create test data don't clean it up. For stateful systems, this means verify lines may interact with each other. + +--- + +## Ensemble Gate Reviewers + +When the ensemble gate runs on COMPLEX specs, anticipate these patterns: + +### PM Reviewer (Claude) +- Focuses on completeness and user value +- Will flag missing user stories or acceptance criteria +- May push for additional features beyond scope — resist scope creep +- Good at catching when a spec describes HOW instead of WHAT + +### Architect Reviewer (Claude) +- Focuses on feasibility, tech risk, and integration points +- Will flag missing error handling, security considerations +- May over-engineer — suggests abstractions and patterns prematurely +- Good at catching when a spec creates coupling or breaks existing contracts + +### VoC Reviewer (Gemini) +- Focuses on user experience and value proposition +- May flag UX concerns that are valid but out of scope +- Sometimes confuses backend API specs with user-facing features +- Good at catching when acceptance criteria don't map to user outcomes + +--- + +## Spec Quality Checklist + +Before finalizing any spec, check against these known issues: + +- [ ] Every THEN/AND has a `# Verify:` line +- [ ] All verify lines use `$BASE_URL`, not hardcoded URLs +- [ ] Verify lines use `-e` flag with `jq` +- [ ] Error cases use `-s` (not `-sf`) with curl for status code checks +- [ ] Acceptance criteria are specific and testable (no "should handle gracefully") +- [ ] Task count is appropriate for complexity tier (1-2 for STANDARD) +- [ ] No scope creep beyond the original brain dump +- [ ] Scenarios cover error paths, not just happy paths +- [ ] Each verify line is self-contained (no cross-dependency) diff --git a/skills/spec-gen/references/verify-line-guide.md b/skills/spec-gen/references/verify-line-guide.md new file mode 100644 index 00000000..9cc62fd0 --- /dev/null +++ b/skills/spec-gen/references/verify-line-guide.md @@ -0,0 +1,254 @@ +# Verify Line Guide — How to Write Executable Verification + +Every THEN and AND clause in a Gherkin scenario MUST have a `# Verify:` line. This is enforced by the linter and is the foundation of our pipeline's reliability. + +## Rules + +1. **`# Verify:` lines are shell commands.** Exit code 0 = pass, non-zero = fail. +2. **Use `$BASE_URL`** for all HTTP targets. Never hardcode `localhost:3000`. +3. **`$BASE_URL` belongs in request URLs only**, not in assertion values. +4. **Each verify line is self-contained.** It must not depend on output from previous verify lines. +5. **Verify lines test behavior, not implementation.** Test what the system does, not how it does it. + +## Directives Reference + +| Directive | Placement | Required? | Consumed By | Purpose | +|-----------|-----------|-----------|-------------|---------| +| `# Verify:` | After THEN/AND | **Yes** (linter-enforced) | Implement stage | Deterministic behavioral check. Exit 0 = pass. | +| `# Test:` | After any scenario line | No | Implement stage | Agent generates a persistent test file. Descriptive, not executable. | + +--- + +## API Verification Patterns (curl + jq) + +These are proven patterns from our pipeline. Use them as templates. + +### Pattern 1: Assert response shape + +Check that a response has required fields: + +```gherkin +Then I receive a task with all required fields +# Verify: curl -sf $BASE_URL/api/tasks/1 | jq -e 'has("id","title","status","createdAt","updatedAt")' +``` + +**How it works**: `jq -e` exits non-zero if the expression evaluates to `false` or `null`. `has()` checks for key existence. + +### Pattern 2: Assert specific field values + +Check that a field has an expected value: + +```gherkin +Then the task status defaults to "todo" +# Verify: curl -sf -X POST $BASE_URL/api/tasks -H 'Content-Type: application/json' -d '{"title":"Status check"}' | jq -e '.status == "todo"' +``` + +**How it works**: `jq -e '.field == "value"'` returns true/false. `-e` makes jq exit non-zero on false. + +### Pattern 3: Assert HTTP status codes + +Check error responses by status code: + +```gherkin +Then I receive a 404 response +# Verify: curl -s -o /dev/null -w '%{http_code}' $BASE_URL/api/tasks/99999 | grep -q '404' +``` + +**How it works**: `-o /dev/null` discards the body. `-w '%{http_code}'` prints only the status code. `grep -q` exits 0 on match. + +**Note**: Use `-sf` (silent + fail) for success cases, `-s` (silent only) for error cases. The `-f` flag makes curl exit non-zero on HTTP errors, which you want for success assertions but NOT for error assertions where you're checking the error code itself. + +### Pattern 4: Assert array properties + +Check collection responses: + +```gherkin +Then I receive a JSON array of tasks +# Verify: curl -sf $BASE_URL/api/tasks | jq -e 'type == "array"' + +And each task has the required fields +# Verify: curl -sf $BASE_URL/api/tasks | jq -e 'all(has("id","title","status"))' + +And the list contains at least 3 items +# Verify: curl -sf $BASE_URL/api/tasks | jq -e 'length >= 3' +``` + +### Pattern 5: Create-then-verify (stateful sequences) + +When a verify line needs setup (create before checking), do it all in one command: + +```gherkin +Then the task no longer appears in the task list +# Verify: ID=$(curl -sf -X POST $BASE_URL/api/tasks -H 'Content-Type: application/json' -d '{"title":"Delete me"}' | jq -r '.id') && curl -sf -X DELETE $BASE_URL/api/tasks/$ID && curl -sf $BASE_URL/api/tasks | jq -e "map(select(.id == $ID)) | length == 0" +``` + +**How it works**: Chain setup → action → assertion with `&&`. If any step fails, the whole line fails. + +### Pattern 6: Assert response headers + +Check headers like pagination metadata: + +```gherkin +Then the response includes a total count header +# Verify: curl -sf -D - $BASE_URL/api/tasks?page=1&limit=10 -o /dev/null | grep -qi 'x-total-count' +``` + +--- + +## UI Verification Patterns (Playwright) + +For features with a user interface, verify lines reference Playwright test files. The implementing agent writes the Playwright test; the verify line runs it. + +### Pattern 1: Run a specific test file + +```gherkin +Then the login form renders with email and password fields +# Verify: npx playwright test tests/e2e/login.spec.ts --reporter=json 2>/dev/null | jq -e '.suites[0].specs | all(.ok)' +# Test: Write a Playwright test that navigates to /login and asserts email input, password input, and submit button are visible +``` + +### Pattern 2: Run a specific test by name + +```gherkin +Then clicking submit with invalid credentials shows an error message +# Verify: npx playwright test tests/e2e/login.spec.ts -g "invalid credentials" --reporter=json 2>/dev/null | jq -e '.suites[0].specs | all(.ok)' +# Test: Write a Playwright test that submits invalid credentials and asserts the error message contains "Invalid email or password" +``` + +### Pattern 3: Check visual state with screenshot comparison + +```gherkin +Then the dashboard matches the approved design +# Verify: npx playwright test tests/e2e/dashboard-visual.spec.ts --reporter=json 2>/dev/null | jq -e '.suites[0].specs | all(.ok)' +# Test: Write a Playwright visual regression test for the dashboard page at 1440x900 viewport +``` + +**Note on UI verify lines**: The `# Test:` directive tells the implementing agent WHAT to test. The `# Verify:` line tells the pipeline HOW to run it. The agent must create the test file first, then the verify line will pass. + +--- + +## Infrastructure Verification Patterns + +For changes to configuration, deployment, or non-HTTP infrastructure. + +### Pattern 1: File existence and content + +```gherkin +Then the config file contains the database URL +# Verify: grep -q 'DATABASE_URL' .env.example +``` + +### Pattern 2: Script executability + +```gherkin +Then the migration script is executable and runs without error +# Verify: test -x scripts/migrate.sh && bash scripts/migrate.sh --dry-run +``` + +### Pattern 3: Docker/container health + +```gherkin +Then the service starts successfully in Docker +# Verify: docker compose up -d --wait && curl -sf http://localhost:3000/health | jq -e '.status == "ok"' && docker compose down +``` + +### Pattern 4: TypeScript compilation + +```gherkin +Then the project compiles without errors +# Verify: npx tsc --noEmit +``` + +### Pattern 5: Dependency validation + +```gherkin +Then all dependencies resolve correctly +# Verify: bun install --frozen-lockfile 2>&1 | tail -1 | grep -qv 'error' +``` + +--- + +## Common Mistakes + +### Mistake 1: Hardcoded localhost + +```gherkin +# WRONG: +# Verify: curl -sf http://localhost:3000/api/tasks | jq -e 'length > 0' + +# RIGHT: +# Verify: curl -sf $BASE_URL/api/tasks | jq -e 'length > 0' +``` + +### Mistake 2: $BASE_URL in assertion values + +```gherkin +# WRONG (BASE_URL in the expected value): +# Verify: curl -sf $BASE_URL/api/tasks/1 | jq -e '.url == "$BASE_URL/api/tasks/1"' + +# RIGHT (only in request URL): +# Verify: curl -sf $BASE_URL/api/tasks/1 | jq -e '.url | endswith("/api/tasks/1")' +``` + +### Mistake 3: Missing `-e` flag on jq + +```gherkin +# WRONG (jq exits 0 even when expression is false): +# Verify: curl -sf $BASE_URL/api/tasks | jq 'length > 0' + +# RIGHT (-e makes jq exit non-zero on false/null): +# Verify: curl -sf $BASE_URL/api/tasks | jq -e 'length > 0' +``` + +### Mistake 4: Using `-sf` for error status checks + +```gherkin +# WRONG (-f makes curl exit non-zero on 4xx/5xx, so grep never runs): +# Verify: curl -sf -o /dev/null -w '%{http_code}' $BASE_URL/api/tasks/bad | grep -q '400' + +# RIGHT (use -s only, let the status code through): +# Verify: curl -s -o /dev/null -w '%{http_code}' $BASE_URL/api/tasks/bad | grep -q '400' +``` + +### Mistake 5: Verify lines that depend on each other + +```gherkin +# WRONG (second verify depends on first creating the task): +Then a task is created +# Verify: curl -sf -X POST $BASE_URL/api/tasks -H 'Content-Type: application/json' -d '{"title":"Dep test"}' +And the task appears in the list +# Verify: curl -sf $BASE_URL/api/tasks | jq -e 'map(select(.title == "Dep test")) | length > 0' + +# RIGHT (self-contained — creates and checks in one line): +And the task appears in the list after creation +# Verify: curl -sf -X POST $BASE_URL/api/tasks -H 'Content-Type: application/json' -d '{"title":"Dep test"}' && curl -sf $BASE_URL/api/tasks | jq -e 'map(select(.title == "Dep test")) | length > 0' +``` + +### Mistake 6: Overly complex single verify lines + +If a verify line exceeds ~200 characters or has 3+ chained operations, consider whether the scenario should be split into separate scenarios. Each scenario should test one thing. + +--- + +## When You Can't Write a Deterministic Verify Line + +Some requirements don't have deterministic, observable outputs. In these cases: + +1. **Use `# Test:` instead.** Write a descriptive test directive that the implementing agent will use to generate a test file. + +```gherkin +Then the cache is invalidated after update +# Test: Unit test that the cache TTL resets when a task is updated via PATCH +``` + +2. **Pair `# Test:` with a `# Verify:` that runs the test.** + +```gherkin +Then the cache is invalidated after update +# Verify: bun test tests/cache-invalidation.test.ts +# Test: Unit test that the cache TTL resets when a task is updated via PATCH +``` + +3. **For purely subjective criteria**, omit both directives. The review stage (LLM-as-judge) evaluates these during adversarial review. + +**Hierarchy**: Prefer `# Verify:` (deterministic) over `# Test:` (agent-generated) over nothing (review-judged). Use the most deterministic option available. diff --git a/skills/spec-gen/scripts/ensemble-gate.sh b/skills/spec-gen/scripts/ensemble-gate.sh new file mode 100755 index 00000000..ed52819a --- /dev/null +++ b/skills/spec-gen/scripts/ensemble-gate.sh @@ -0,0 +1,153 @@ +#!/usr/bin/env bash +# ensemble-gate.sh — Spec-stage validation: model×role parallel CLI invocations +# Reads gate config from .ensemble/gates/, runs reviewers, aggregates verdicts +# ~100 lines as specced + +set -euo pipefail + +GATE_CONFIG="${1:-.ensemble/gates/spec-gate.yaml}" +SPEC_PATH="${2:-openspec/specs}" +MAX_ROUNDS="${MAX_ROUNDS:-3}" + +echo "=== ensemble-gate.sh ===" +echo "Gate config: $GATE_CONFIG" +echo "Spec path: $SPEC_PATH" +echo "" + +# Collect all spec content for review context +SPEC_CONTENT="" +for spec_file in $(find "$SPEC_PATH" -name "*.md" -type f); do + SPEC_CONTENT+="--- $(basename "$spec_file") ---"$'\n' + SPEC_CONTENT+="$(cat "$spec_file")"$'\n\n' +done + +if [[ -z "$SPEC_CONTENT" ]]; then + echo "ERROR: No spec files found in $SPEC_PATH" + exit 1 +fi + +# Parse reviewers from YAML config (lightweight — no yq dependency) +# Expected format: role, model, system_prompt, verdict_required +declare -a ROLES MODELS PROMPTS REQUIRED +reviewer_idx=-1 +in_prompt=false +current_prompt="" + +while IFS= read -r line; do + if [[ "$line" =~ ^[[:space:]]*-\ role:\ (.+)$ ]]; then + if [[ $reviewer_idx -ge 0 ]] && $in_prompt; then + PROMPTS[$reviewer_idx]="$current_prompt" + in_prompt=false + current_prompt="" + fi + ((reviewer_idx++)) + ROLES[$reviewer_idx]="${BASH_REMATCH[1]}" + REQUIRED[$reviewer_idx]="true" + elif [[ "$line" =~ ^[[:space:]]*model:\ (.+)$ ]] && [[ $reviewer_idx -ge 0 ]]; then + MODELS[$reviewer_idx]="${BASH_REMATCH[1]}" + elif [[ "$line" =~ ^[[:space:]]*verdict_required:\ (.+)$ ]] && [[ $reviewer_idx -ge 0 ]]; then + REQUIRED[$reviewer_idx]="${BASH_REMATCH[1]}" + elif [[ "$line" =~ ^[[:space:]]*system_prompt:\ \|$ ]] && [[ $reviewer_idx -ge 0 ]]; then + in_prompt=true + current_prompt="" + elif $in_prompt; then + if [[ "$line" =~ ^[[:space:]]{4,} ]] || [[ -z "$line" ]]; then + current_prompt+="${line}"$'\n' + else + PROMPTS[$reviewer_idx]="$current_prompt" + in_prompt=false + current_prompt="" + fi + fi +done < "$GATE_CONFIG" + +# Capture last prompt +if $in_prompt && [[ $reviewer_idx -ge 0 ]]; then + PROMPTS[$reviewer_idx]="$current_prompt" +fi + +TOTAL=$((reviewer_idx + 1)) +echo "Loaded $TOTAL reviewers" + +# Run each reviewer +declare -a VERDICTS FEEDBACK +AGGREGATE="PASS" +HAS_CONCERNS=false + +for i in $(seq 0 $((TOTAL - 1))); do + role="${ROLES[$i]}" + model="${MODELS[$i]}" + prompt="${PROMPTS[$i]}" + required="${REQUIRED[$i]}" + + echo "" + echo "--- $role ($model) ---" + + # Select CLI based on model + review_prompt="$prompt"$'\n\n'"Review the following spec and provide your verdict (PASS, FAIL, or CONCERNS) on the first line, followed by your detailed feedback:"$'\n\n'"$SPEC_CONTENT" + + case "$model" in + claude) + result=$(echo "$review_prompt" | claude -p --output-format text 2>&1) || true + ;; + codex) + result=$(codex exec "$review_prompt" 2>&1) || true + ;; + gemini) + result=$(echo "$review_prompt" | gemini 2>&1) || true + ;; + *) + echo " WARNING: Unknown model '$model', skipping" + continue + ;; + esac + + # Extract verdict from first line + first_line=$(echo "$result" | head -1) + if echo "$first_line" | grep -qi "FAIL"; then + verdict="FAIL" + elif echo "$first_line" | grep -qi "CONCERNS"; then + verdict="CONCERNS" + else + verdict="PASS" + fi + + VERDICTS[$i]="$verdict" + FEEDBACK[$i]="$result" + + echo " Verdict: $verdict" + + # Aggregate + if [[ "$verdict" == "FAIL" ]] && [[ "$required" == "true" ]]; then + AGGREGATE="FAIL" + elif [[ "$verdict" == "CONCERNS" ]]; then + HAS_CONCERNS=true + fi +done + +# Output structured gate result +echo "" +echo "=== Gate Result ===" + +# JSON gate layer +gate_json=$(jq -n \ + --arg aggregate "$AGGREGATE" \ + --argjson has_concerns "$HAS_CONCERNS" \ + '{aggregate_verdict: $aggregate, has_concerns: $has_concerns, requires_human: ($aggregate == "FAIL" or $has_concerns)}') + +echo "$gate_json" | jq . + +# Feedback layer (plain text for agent consumption) +echo "" +echo "=== Reviewer Feedback ===" +for i in $(seq 0 $((TOTAL - 1))); do + echo "" + echo "### ${ROLES[$i]} (${MODELS[$i]}) — ${VERDICTS[$i]}" + echo "${FEEDBACK[$i]}" + echo "" +done + +# Exit code: 0=PASS, 1=FAIL +if [[ "$AGGREGATE" == "FAIL" ]]; then + exit 1 +fi diff --git a/skills/spec-gen/scripts/freeze-and-queue.sh b/skills/spec-gen/scripts/freeze-and-queue.sh new file mode 100755 index 00000000..6c4f5d48 --- /dev/null +++ b/skills/spec-gen/scripts/freeze-and-queue.sh @@ -0,0 +1,1168 @@ +#!/usr/bin/env bash +# freeze-and-queue.sh — Creates parent + sub-issue hierarchy in Linear from a spec +# Decision 32: Linear as spec store — specs live as Linear issues, not filesystem files. +# +# Usage: +# bash freeze-and-queue.sh [--dry-run] [--parent-only] [--allow-empty-scenarios] [--update ISSUE_ID] [--timeout SECS] <workflow-path> [spec-file] +# cat spec.md | bash freeze-and-queue.sh [--dry-run] [--parent-only] <workflow-path> +# bash freeze-and-queue.sh --trivial "Issue title" <workflow-path> +# echo "description" | bash freeze-and-queue.sh --trivial "Issue title" <workflow-path> +# +# The WORKFLOW file provides: project_slug (from YAML frontmatter) +# Auth: Uses LINEAR_API_KEY env var (schpet linear CLI picks it up automatically). +# Team ID is resolved from the project via the Linear API. + +# Relation semantics (Linear GraphQL API): +# issueRelationCreate(input: { issueId: BLOCKER, relatedIssueId: BLOCKED, type: blocks }) +# means: BLOCKER blocks BLOCKED (i.e., BLOCKED is blocked by BLOCKER) +# To verify: query BLOCKER's relations — should have type:"blocks" pointing to BLOCKED + +set -euo pipefail + +# ── Parse flags ────────────────────────────────────────────────────────────── + +DRY_RUN=false +UPDATE_ISSUE_ID="" +PARENT_ONLY=false +TRIVIAL=false +TRIVIAL_TITLE="" +ALLOW_EMPTY_SCENARIOS=false +POSITIONAL=() + +while [[ $# -gt 0 ]]; do + case "$1" in + --dry-run) DRY_RUN=true; shift ;; + --update) shift; UPDATE_ISSUE_ID="${1:-}"; shift ;; + --parent-only) PARENT_ONLY=true; shift ;; + --trivial) TRIVIAL=true; shift; TRIVIAL_TITLE="${1:-}"; shift ;; + --timeout) shift; API_TIMEOUT_ARG="${1:-}"; shift ;; + --allow-empty-scenarios) ALLOW_EMPTY_SCENARIOS=true; shift ;; + *) POSITIONAL+=("$1"); shift ;; + esac +done + +# Set API timeout from --timeout flag, env var, or default (30s) +API_TIMEOUT="${API_TIMEOUT_ARG:-${API_TIMEOUT:-30}}" + +WORKFLOW_PATH="${POSITIONAL[0]:-}" +SPEC_FILE="${POSITIONAL[1]:-}" + +if [[ -z "$WORKFLOW_PATH" ]]; then + echo "Usage: freeze-and-queue.sh [--dry-run] [--parent-only] [--update ISSUE_ID] [--trivial TITLE] [--timeout SECS] <workflow-path> [spec-file]" >&2 + echo " --trivial TITLE Create a single issue in Todo state (no spec, no parent/sub-issue hierarchy)" >&2 + echo " --timeout SECS API call timeout in seconds (default: 30, env: API_TIMEOUT)" >&2 + echo " If no spec-file is given, reads spec content from stdin." >&2 + exit 1 +fi + +if [[ ! -f "$WORKFLOW_PATH" ]]; then + echo "ERROR: WORKFLOW file not found: $WORKFLOW_PATH" >&2 + exit 1 +fi + +# ── Portable timeout wrapper ────────────────────────────────────────────────── +# macOS has no `timeout` command. Uses perl's alarm() signal which works on all +# POSIX systems. Args: $1=label (for error messages), remaining args=command. + +run_with_timeout() { + local label="$1"; shift + local output + if output=$(perl -e "alarm($API_TIMEOUT); exec(@ARGV)" -- "$@" 2>&1); then + echo "$output" + return 0 + else + local exit_code=$? + if [[ $exit_code -eq 142 ]]; then + echo "ERROR: Timed out after ${API_TIMEOUT}s during: $label" >&2 + echo " Re-run the script or increase timeout with --timeout <seconds>" >&2 + exit 1 + else + echo "$output" + return $exit_code + fi + fi +} + +# ── Linear CLI helpers ─────────────────────────────────────────────────────── +# All Linear operations use schpet linear CLI (binary: "linear"), which handles +# auth via LINEAR_API_KEY env var. + +LINEAR_CLI="linear" + +# ── Resolve team from project ──────────────────────────────────────────────── + +resolve_team_from_project() { + # Single GraphQL query to resolve both project ID and team info from slugId + local project_json + project_json=$(run_with_timeout "resolving project and team" $LINEAR_CLI api \ + --variable "slug=$PROJECT_SLUG" \ + 'query($slug: String!) { projects(filter: { slugId: { eq: $slug } }) { nodes { id teams { nodes { id key } } } } }') + + PROJECT_ID=$(echo "$project_json" | jq -r '.data.projects.nodes[0].id // empty') + if [[ -z "$PROJECT_ID" ]]; then + echo "ERROR: Could not find project with slugId: $PROJECT_SLUG" >&2 + echo " Ensure the project exists and LINEAR_API_KEY is set." >&2 + exit 1 + fi + echo "Project ID: $PROJECT_ID" + + TEAM_ID=$(echo "$project_json" | jq -r '.data.projects.nodes[0].teams.nodes[0].id // empty') + TEAM_KEY=$(echo "$project_json" | jq -r '.data.projects.nodes[0].teams.nodes[0].key // empty') + + if [[ -z "$TEAM_ID" ]]; then + echo "ERROR: Could not resolve team from project: $PROJECT_ID" >&2 + echo " API response: $project_json" >&2 + exit 1 + fi + echo "Resolved team: $TEAM_KEY (ID: $TEAM_ID)" +} + +# ── Resolve workflow state IDs for the team ────────────────────────────────── +# Globals populated by resolve_all_states(): +DRAFT_STATE_ID="" +TODO_STATE_ID="" +BACKLOG_STATE_ID="" + +resolve_all_states() { + # Single workflowStates GraphQL query to batch-resolve all needed state IDs + local states_json + states_json=$(run_with_timeout "resolving workflow states" $LINEAR_CLI api \ + --variable "teamId=$TEAM_ID" \ + 'query($teamId: ID!) { workflowStates(filter: { team: { id: { eq: $teamId } } }) { nodes { id name } } }') + + DRAFT_STATE_ID=$(echo "$states_json" | jq -r '.data.workflowStates.nodes[] | select(.name == "Draft") | .id' | head -1) + TODO_STATE_ID=$(echo "$states_json" | jq -r '.data.workflowStates.nodes[] | select(.name == "Todo") | .id' | head -1) + BACKLOG_STATE_ID=$(echo "$states_json" | jq -r '.data.workflowStates.nodes[] | select(.name == "Backlog") | .id' | head -1) +} + +# ── Helper functions ────────────────────────────────────────────────────────── + +# Helper: create a blocks relation via high-level linear CLI command. +# Args: $1=blocker_uuid $2=blocked_uuid $3=blocker_ident $4=blocked_ident $5=reason +create_blocks_relation() { + local blocker_uuid="$1" blocked_uuid="$2" + local blocker_ident="$3" blocked_ident="$4" reason="$5" + + local result + if result=$(run_with_timeout "creating blocking relation" $LINEAR_CLI issue relation add "$blocker_ident" blocks "$blocked_ident" 2>&1); then + echo " $blocked_ident blocked by $blocker_ident ($reason)" + return 0 + fi + echo " WARNING: Failed to create relation $blocker_ident blocks $blocked_ident" >&2 + echo " Response: ${result:-<empty>}" >&2 + return 1 +} + +# Verify that a blocking relation was created with the correct direction. +# Args: $1=blocker_uuid $2=blocked_uuid $3=blocker_ident $4=blocked_ident +verify_blocking_relation() { + local blocker_uuid="$1" blocked_uuid="$2" + local blocker_ident="$3" blocked_ident="$4" + + if [[ "$DRY_RUN" == true ]]; then + return 0 + fi + + local verify_result + verify_result=$(run_with_timeout "verifying blocking relation" $LINEAR_CLI api \ + --variable "issueId=$blocker_uuid" \ + 'query($issueId: String!) { issue(id: $issueId) { relations { nodes { type relatedIssue { id } } } } }' 2>/dev/null) || true + + local found + found=$(echo "$verify_result" | jq -r --arg blocked "$blocked_uuid" \ + '.data.issue.relations.nodes[] | select(.type == "blocks" and .relatedIssue.id == $blocked) | .type' 2>/dev/null) || true + + if [[ "$found" == "blocks" ]]; then + echo " Verified: $blocker_ident blocks $blocked_ident" + return 0 + else + echo " WARNING: Could not verify relation $blocker_ident blocks $blocked_ident" >&2 + echo " Manual fix: linear issue relation add $blocker_ident blocks $blocked_ident" >&2 + return 1 + fi +} + +# Post-creation verification — confirms project.slugId and parent.id match expected. +# Args: $1=issue_uuid, $2=expected_project_slug, $3=expected_parent_id (optional) +verify_issue_creation() { + local issue_uuid="$1" + local expected_slug="$2" + local expected_parent_id="${3:-}" + + if [[ "$DRY_RUN" == true ]]; then + return 0 + fi + + local verify_result + verify_result=$(run_with_timeout "verifying issue creation" $LINEAR_CLI api \ + --variable "issueId=$issue_uuid" \ + 'query($issueId: String!) { issue(id: $issueId) { project { slugId } parent { id } } }') || true + + local actual_slug + actual_slug=$(echo "$verify_result" | jq -r '.data.issue.project.slugId // empty') + if [[ -n "$actual_slug" && "$actual_slug" != "$expected_slug" ]]; then + echo "WARNING: project mismatch on $issue_uuid — expected slugId=$expected_slug, got $actual_slug" >&2 + elif [[ -z "$actual_slug" ]]; then + echo "WARNING: VERIFY FAIL — could not confirm project.slugId for $issue_uuid" >&2 + fi + + if [[ -n "$expected_parent_id" ]]; then + local actual_parent + actual_parent=$(echo "$verify_result" | jq -r '.data.issue.parent.id // empty') + if [[ -n "$actual_parent" && "$actual_parent" != "$expected_parent_id" ]]; then + echo "WARNING: parent mismatch on $issue_uuid — expected parent=$expected_parent_id, got $actual_parent" >&2 + elif [[ -z "$actual_parent" ]]; then + echo "WARNING: VERIFY FAIL — could not confirm parent.id for $issue_uuid" >&2 + fi + fi +} + +# ── Trivial mode: single issue in Todo, no spec ───────────────────────────── + +if [[ "$TRIVIAL" == true ]]; then + if [[ -z "$TRIVIAL_TITLE" ]]; then + echo "ERROR: --trivial requires a title argument." >&2 + echo " Usage: freeze-and-queue.sh --trivial 'Fix the typo in README' <workflow-path>" >&2 + exit 1 + fi + + # Read optional description from stdin or spec file + TRIVIAL_DESC="" + if [[ -n "$SPEC_FILE" && -f "$SPEC_FILE" ]]; then + TRIVIAL_DESC=$(cat "$SPEC_FILE") + elif [[ ! -t 0 ]]; then + TRIVIAL_DESC=$(cat) + fi + + # Parse WORKFLOW for project_slug + FRONTMATTER=$(sed -n '/^---$/,/^---$/p' "$WORKFLOW_PATH" | sed '1d;$d') + PROJECT_SLUG=$(echo "$FRONTMATTER" | grep 'project_slug:' | head -1 | sed 's/.*project_slug:[[:space:]]*//' | tr -d '"'"'" | xargs) + + if [[ -z "$PROJECT_SLUG" ]]; then + echo "ERROR: No project_slug found in WORKFLOW file: $WORKFLOW_PATH" >&2 + exit 1 + fi + + echo "=== freeze-and-queue.sh (trivial) ===" + echo "Title: $TRIVIAL_TITLE" + echo "WORKFLOW: $WORKFLOW_PATH" + echo "Project slug: $PROJECT_SLUG" + echo "Dry run: $DRY_RUN" + + if [[ "$DRY_RUN" == true ]]; then + echo "" + echo "--- TRIVIAL ISSUE ---" + echo "Title: $TRIVIAL_TITLE" + echo "State: Todo" + echo "Description: ${TRIVIAL_DESC:-(none)}" + echo "" + echo "=== Dry run complete: 1 trivial issue would be created ===" + exit 0 + fi + + # Resolve team from project + resolve_team_from_project + + # Resolve all states in one batch query + resolve_all_states + TODO_STATE_NAME="Todo" + if [[ -z "$TODO_STATE_ID" ]]; then + echo "WARNING: 'Todo' state not found. Falling back to 'Backlog'..." >&2 + TODO_STATE_ID="$BACKLOG_STATE_ID" + TODO_STATE_NAME="Backlog" + fi + + # Create issue via GraphQL — includes projectId and stateId at creation time + TRIVIAL_GQL_TMPFILE=$(mktemp) + trap 'rm -f "$TRIVIAL_GQL_TMPFILE"' EXIT + if [[ -n "$TRIVIAL_DESC" ]]; then + cat > "$TRIVIAL_GQL_TMPFILE" <<'GQLEOF' +mutation($title: String!, $description: String, $teamId: String!, $stateId: String!, $projectId: String!) { + issueCreate(input: { + teamId: $teamId + title: $title + description: $description + stateId: $stateId + projectId: $projectId + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "creating trivial issue (with description)" $LINEAR_CLI api \ + --variable "title=$TRIVIAL_TITLE" \ + --variable "description=$TRIVIAL_DESC" \ + --variable "teamId=$TEAM_ID" \ + --variable "stateId=$TODO_STATE_ID" \ + --variable "projectId=$PROJECT_ID" \ + < "$TRIVIAL_GQL_TMPFILE") + else + cat > "$TRIVIAL_GQL_TMPFILE" <<'GQLEOF' +mutation($title: String!, $teamId: String!, $stateId: String!, $projectId: String!) { + issueCreate(input: { + teamId: $teamId + title: $title + stateId: $stateId + projectId: $projectId + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "creating trivial issue" $LINEAR_CLI api \ + --variable "title=$TRIVIAL_TITLE" \ + --variable "teamId=$TEAM_ID" \ + --variable "stateId=$TODO_STATE_ID" \ + --variable "projectId=$PROJECT_ID" \ + < "$TRIVIAL_GQL_TMPFILE") + fi + rm -f "$TRIVIAL_GQL_TMPFILE" + + identifier=$(echo "$result" | jq -r '.data.issueCreate.issue.identifier // empty') + url=$(echo "$result" | jq -r '.data.issueCreate.issue.url // empty') + issue_id=$(echo "$result" | jq -r '.data.issueCreate.issue.id // empty') + success=$(echo "$result" | jq -r '.data.issueCreate.success // false') + + if [[ "$success" == "true" && -n "$identifier" ]]; then + verify_issue_creation "$issue_id" "$PROJECT_SLUG" + echo "" + echo "=== Done (trivial) ===" + echo "Issue: $identifier ($url)" + echo "State: $TODO_STATE_NAME" + echo "" + echo "Symphony-ts will pick up this issue automatically when the pipeline runs." + else + echo "FAILED to create trivial issue" >&2 + echo "Response: $result" >&2 + exit 1 + fi + exit 0 +fi + +# ── Read spec content ──────────────────────────────────────────────────────── + +if [[ -n "$SPEC_FILE" ]]; then + if [[ ! -f "$SPEC_FILE" ]]; then + echo "ERROR: Spec file not found: $SPEC_FILE" >&2 + exit 1 + fi + SPEC_CONTENT=$(cat "$SPEC_FILE") +elif [[ ! -t 0 ]]; then + SPEC_CONTENT=$(cat) +else + echo "ERROR: No spec file provided and stdin is a terminal." >&2 + echo " Provide a spec file or pipe spec content to stdin." >&2 + exit 1 +fi + +if [[ -z "$SPEC_CONTENT" ]]; then + echo "ERROR: Spec content is empty." >&2 + exit 1 +fi + +# ── Parse WORKFLOW config ──────────────────────────────────────────────────── + +# Extract YAML frontmatter between --- markers +FRONTMATTER=$(sed -n '/^---$/,/^---$/p' "$WORKFLOW_PATH" | sed '1d;$d') + +# Extract project_slug from frontmatter +PROJECT_SLUG=$(echo "$FRONTMATTER" | grep 'project_slug:' | head -1 | sed 's/.*project_slug:[[:space:]]*//' | tr -d '"'"'" | xargs) + +if [[ -z "$PROJECT_SLUG" ]]; then + echo "ERROR: No project_slug found in WORKFLOW file: $WORKFLOW_PATH" >&2 + exit 1 +fi + +echo "=== freeze-and-queue.sh ===" +echo "WORKFLOW: $WORKFLOW_PATH" +echo "Project slug: $PROJECT_SLUG" +echo "Dry run: $DRY_RUN" +echo "Parent only: $PARENT_ONLY" +[[ -n "$UPDATE_ISSUE_ID" ]] && echo "Update mode: $UPDATE_ISSUE_ID" + +# ── Parse tasks from spec content ──────────────────────────────────────────── + +# Extract title from first # heading +SPEC_TITLE=$(echo "$SPEC_CONTENT" | grep -m1 '^# ' | sed 's/^# //') +if [[ -z "$SPEC_TITLE" ]]; then + SPEC_TITLE="Spec $(date +%Y-%m-%d)" +fi + +# Parse ## Task N: headers and collect each task's content +declare -a TASK_TITLES TASK_BODIES TASK_SCOPES +task_idx=-1 +current_body="" +current_scope="" + +while IFS= read -r line; do + if [[ "$line" =~ ^#{2,3}\ Task\ [0-9]+:\ (.+)$ ]] || [[ "$line" =~ ^#{2,3}\ Task\ [0-9]+\ -\ (.+)$ ]] || [[ "$line" =~ ^#{2,3}\ Task\ [0-9]+\.\ (.+)$ ]]; then + # Save previous task + if [[ $task_idx -ge 0 ]]; then + TASK_BODIES[$task_idx]="$current_body" + TASK_SCOPES[$task_idx]="$current_scope" + fi + ((task_idx++)) + TASK_TITLES[$task_idx]="${BASH_REMATCH[1]}" + current_body="" + current_scope="" + elif [[ $task_idx -ge 0 ]]; then + # Accumulate body lines + current_body+="$line"$'\n' + # Extract scope from **Scope**: lines + if [[ "$line" =~ ^\*\*Scope\*\*:\ (.+)$ ]]; then + current_scope="${BASH_REMATCH[1]}" + fi + fi +done <<< "$SPEC_CONTENT" + +# Save last task +if [[ $task_idx -ge 0 ]]; then + TASK_BODIES[$task_idx]="$current_body" + TASK_SCOPES[$task_idx]="$current_scope" +fi + +TOTAL=$((task_idx + 1)) +echo "" +echo "Spec title: $SPEC_TITLE" +echo "Found $TOTAL tasks" + +if [[ $TOTAL -eq 0 ]]; then + echo "WARNING: No tasks found. Expected ## Task N: headers in spec content." >&2 + echo "Parent issue will be created without sub-issues." >&2 +fi + +# ── Detect file-path overlap for blockedBy relations ───────────────────────── + +detect_overlap() { + local scope_a="$1" scope_b="$2" + [[ -z "$scope_a" || -z "$scope_b" ]] && return 1 + IFS=', ' read -ra files_a <<< "$scope_a" + IFS=', ' read -ra files_b <<< "$scope_b" + for fa in "${files_a[@]}"; do + for fb in "${files_b[@]}"; do + fa_clean=$(echo "$fa" | sed 's/`//g' | xargs) + fb_clean=$(echo "$fb" | sed 's/`//g' | xargs) + [[ -z "$fa_clean" || -z "$fb_clean" ]] && continue + if [[ "$fa_clean" == "$fb_clean" ]] || \ + [[ "$fa_clean" == "$fb_clean"/* ]] || \ + [[ "$fb_clean" == "$fa_clean"/* ]]; then + return 0 + fi + done + done + return 1 +} + +# ── Parse task priorities for sequential ordering ──────────────────────────── + +declare -a TASK_PRIORITIES +for ((i=0; i<TOTAL; i++)); do + pri=$(echo "${TASK_BODIES[$i]}" | grep -oE '\*\*Priority\*\*:[[:space:]]*[0-9]+' | grep -oE '[0-9]+' | head -1 || true) + TASK_PRIORITIES[$i]="${pri:-$((i+1))}" +done + +# Build priority-sorted index array (stable sort by priority, preserving task order for ties) +SORTED_INDICES=() +for ((i=0; i<TOTAL; i++)); do + SORTED_INDICES+=("$i") +done + +# Bubble sort by priority (stable — preserves original order for equal priorities) +for ((i=0; i<TOTAL; i++)); do + for ((j=0; j<TOTAL-i-1; j++)); do + idx_a="${SORTED_INDICES[$j]}" + idx_b="${SORTED_INDICES[$((j+1))]}" + if (( TASK_PRIORITIES[idx_a] > TASK_PRIORITIES[idx_b] )); then + SORTED_INDICES[$j]="$idx_b" + SORTED_INDICES[$((j+1))]="$idx_a" + fi + done +done + +# ── Parse Scenarios section from parent spec ───────────────────────────────── + +# Extract the full Scenarios section (from "## Scenarios" until the next ## heading) +SCENARIOS_SECTION="" +in_scenarios=false +while IFS= read -r line; do + if [[ "$line" =~ ^##\ Scenarios ]]; then + in_scenarios=true + continue + elif [[ "$in_scenarios" == true && "$line" =~ ^##\ && ! "$line" =~ ^###\ ]]; then + break + fi + if [[ "$in_scenarios" == true ]]; then + SCENARIOS_SECTION+="$line"$'\n' + fi +done <<< "$SPEC_CONTENT" + +# Parse individual scenarios from the Scenarios section +# Each scenario starts with "Scenario:" (possibly inside a gherkin block) and ends +# before the next "Scenario:" or the end of the section. +# Feature headings (### Feature: <name>) group scenarios for feature-level matching. +declare -a SCENARIO_NAMES SCENARIO_BODIES SCENARIO_FEATURES +scenario_idx=-1 +current_scenario_body="" +current_scenario_name="" +current_feature="" + +while IFS= read -r line; do + # Track Feature headings for feature-level grouping + if [[ "$line" =~ ^###[[:space:]]+Feature:[[:space:]]*(.+)$ ]]; then + current_feature="${BASH_REMATCH[1]}" + continue + fi + if [[ "$line" =~ ^[[:space:]]*Scenario:[[:space:]]*(.+)$ ]]; then + # Save previous scenario + if [[ $scenario_idx -ge 0 ]]; then + SCENARIO_BODIES[$scenario_idx]="$current_scenario_body" + fi + ((scenario_idx++)) + current_scenario_name="${BASH_REMATCH[1]}" + SCENARIO_NAMES[$scenario_idx]="$current_scenario_name" + SCENARIO_FEATURES[$scenario_idx]="$current_feature" + current_scenario_body="$line"$'\n' + elif [[ $scenario_idx -ge 0 ]]; then + # Skip gherkin code fence markers (``` lines) + if [[ "$line" =~ ^[[:space:]]*\`\`\` ]]; then + continue + fi + current_scenario_body+="$line"$'\n' + fi +done <<< "$SCENARIOS_SECTION" + +# Save last scenario +if [[ $scenario_idx -ge 0 ]]; then + SCENARIO_BODIES[$scenario_idx]="$current_scenario_body" +fi + +TOTAL_SCENARIOS=$((scenario_idx + 1)) +echo "Found $TOTAL_SCENARIOS scenarios in spec" + +# ── Parse Boundaries section from parent spec ──────────────────────────────── + +BOUNDARIES_SECTION="" +in_boundaries=false +while IFS= read -r line; do + if [[ "$line" =~ ^##\ Boundaries ]]; then + in_boundaries=true + BOUNDARIES_SECTION+="## Boundaries"$'\n' + continue + elif [[ "$in_boundaries" == true && "$line" =~ ^##\ && ! "$line" =~ ^###\ ]]; then + break + fi + if [[ "$in_boundaries" == true ]]; then + BOUNDARIES_SECTION+="$line"$'\n' + fi +done <<< "$SPEC_CONTENT" + +# ── Parse task scenario references ─────────────────────────────────────────── + +declare -a TASK_SCENARIO_REFS +for ((i=0; i<TOTAL; i++)); do + ref=$(echo "${TASK_BODIES[$i]}" | grep -oE '\*\*Scenarios\*\*:[[:space:]]*(.+)' | sed 's/\*\*Scenarios\*\*:[[:space:]]*//' | head -1 || true) + TASK_SCENARIO_REFS[$i]="${ref:-}" +done + +# ── Build sub-issue bodies with inlined Gherkin + verify lines ─────────────── + +match_scenario_to_task() { + local scenario_name="$1" + local task_ref="$2" + local scenario_feature="${3:-}" + + # "All" matches everything + if [[ "$task_ref" == "All" || "$task_ref" == "all" ]]; then + return 0 + fi + + # Check if the scenario name appears in the comma-separated task ref list + IFS=',' read -ra refs <<< "$task_ref" + for ref in "${refs[@]}"; do + ref_clean=$(echo "$ref" | xargs) # trim whitespace + + # Direct scenario name match (existing behavior) + if [[ "$scenario_name" == *"$ref_clean"* || "$ref_clean" == *"$scenario_name"* ]]; then + return 0 + fi + + # Feature-level match: ref like "<Feature Name> scenarios" matches all scenarios under that Feature + if [[ -n "$scenario_feature" && "$ref_clean" =~ ^(.+)[[:space:]]+(scenarios|Scenarios)$ ]]; then + local feature_ref="${BASH_REMATCH[1]}" + if [[ "$scenario_feature" == "$feature_ref" ]]; then + return 0 + fi + fi + done + return 1 +} + +build_sub_issue_body() { + local idx=$1 + local body="${TASK_BODIES[$idx]}" + local task_ref="${TASK_SCENARIO_REFS[$idx]:-}" + local parent_ref="${PARENT_REF_LINE:-}" + + local output="" + + # F3: Parent reference at top of sub-issue body + if [[ -n "$parent_ref" ]]; then + output+="$parent_ref"$'\n'$'\n' + fi + + output+="## Task Scope"$'\n' + output+="$body"$'\n' + + # Add matched scenarios + if [[ -n "$task_ref" && $TOTAL_SCENARIOS -gt 0 ]]; then + output+="## Scenarios"$'\n'$'\n' + local matched=0 + for ((s=0; s<TOTAL_SCENARIOS; s++)); do + if match_scenario_to_task "${SCENARIO_NAMES[$s]}" "$task_ref" "${SCENARIO_FEATURES[$s]:-}"; then + output+="${SCENARIO_BODIES[$s]}"$'\n' + ((matched++)) + fi + done + if [[ $matched -eq 0 ]]; then + output+="_No matching scenarios found for: ${task_ref}_"$'\n' + fi + output+=$'\n' + fi + + # Add boundaries section + if [[ -n "$BOUNDARIES_SECTION" ]]; then + output+="$BOUNDARIES_SECTION"$'\n' + fi + + output+="---"$'\n' + output+="_Created by freeze-and-queue.sh from parent spec. Implement exactly what is specified._" + echo "$output" +} + +# ── F2: Validate that all tasks have matching scenarios (hard gate) ──────── +# Checks each task's scenario ref against the parsed scenarios. If any task has +# a non-empty ref that matches zero scenarios, the script fails unless +# --allow-empty-scenarios is passed. + +if [[ $TOTAL -gt 0 && $TOTAL_SCENARIOS -gt 0 ]]; then + empty_tasks=() + for ((i=0; i<TOTAL; i++)); do + task_ref="${TASK_SCENARIO_REFS[$i]:-}" + if [[ -z "$task_ref" ]]; then + continue + fi + matched=0 + for ((s=0; s<TOTAL_SCENARIOS; s++)); do + if match_scenario_to_task "${SCENARIO_NAMES[$s]}" "$task_ref" "${SCENARIO_FEATURES[$s]:-}"; then + ((matched++)) + fi + done + if [[ $matched -eq 0 ]]; then + empty_tasks+=("${TASK_TITLES[$i]} (ref: $task_ref)") + fi + done + + if [[ ${#empty_tasks[@]} -gt 0 ]]; then + if [[ "$ALLOW_EMPTY_SCENARIOS" == true ]]; then + echo "WARNING: No matching scenarios for ${#empty_tasks[@]} task(s):" >&2 + for t in "${empty_tasks[@]}"; do + echo " - $t" >&2 + done + else + echo "ERROR: No matching scenarios for ${#empty_tasks[@]} task(s):" >&2 + for t in "${empty_tasks[@]}"; do + echo " - $t" >&2 + done + echo "" >&2 + echo "Fix the **Scenarios** refs in the spec, or re-run with --allow-empty-scenarios to bypass." >&2 + exit 1 + fi + fi +fi + +# ── Execute: Create or update parent, create sub-issues ────────────────────── + +if [[ "$DRY_RUN" == true ]]; then + echo "" + echo "═══════════════════════════════════════════════════════════════" + echo " DRY RUN — No Linear API calls will be made" + echo "═══════════════════════════════════════════════════════════════" + echo "" + + echo "--- PARENT ISSUE ---" + echo "Title: [Spec] $SPEC_TITLE" + echo "State: Draft (fallback: Backlog)" + echo "Description: (full spec content, ${#SPEC_CONTENT} chars)" + echo "" + + if [[ "$PARENT_ONLY" == true ]]; then + echo "=== Dry run complete (--parent-only): 1 parent issue would be created ===" + exit 0 + fi + + # F3: Set parent reference for sub-issue bodies (dry-run uses spec title as placeholder) + PARENT_REF_LINE="Parent spec: [Spec] $SPEC_TITLE" + + echo "--- PHASE 1: Create sub-issues (WITHOUT project — invisible to symphony-ts) ---" + echo "" + relation_count=0 + for ((k=0; k<TOTAL; k++)); do + i="${SORTED_INDICES[$k]}" + echo " SUB-ISSUE $((i+1)): ${TASK_TITLES[$i]}" + echo " Priority: ${TASK_PRIORITIES[$i]}" + echo " State: Todo" + echo " Project: (deferred — assigned after relations)" + echo " Scope: ${TASK_SCOPES[$i]:-<none>}" + echo " Scenarios ref: ${TASK_SCENARIO_REFS[$i]:-<none>}" + sub_body=$(build_sub_issue_body "$i") + echo " Body:" + echo "$sub_body" | sed 's/^/ /' + # Show sequential blocking relation immediately after this sub-issue + if [[ $k -gt 0 ]]; then + blocker_idx="${SORTED_INDICES[$((k-1))]}" + echo " → blocked by Task $((blocker_idx+1)) (${TASK_TITLES[$blocker_idx]})" + ((relation_count++)) + fi + echo "" + done + + echo "--- PHASE 2: Add blocking relations ---" + echo "" + echo " Sequential chain: $((TOTAL > 1 ? TOTAL - 1 : 0)) relations" + + # Additional file-overlap relations (second pass, only those not already covered by sequential chain) + overlap_count=0 + for ((i=0; i<TOTAL; i++)); do + for ((j=i+1; j<TOTAL; j++)); do + if detect_overlap "${TASK_SCOPES[$i]:-}" "${TASK_SCOPES[$j]:-}"; then + # Check if this pair is already covered by sequential chain + already_covered=false + for ((k=0; k<TOTAL-1; k++)); do + si="${SORTED_INDICES[$k]}" + si_next="${SORTED_INDICES[$((k+1))]}" + if [[ "$si" == "$i" && "$si_next" == "$j" ]] || [[ "$si" == "$j" && "$si_next" == "$i" ]]; then + already_covered=true + break + fi + done + if [[ "$already_covered" == false ]]; then + echo " File overlap: Task $((j+1)) (${TASK_TITLES[$j]}) blocked by Task $((i+1)) (${TASK_TITLES[$i]})" + ((relation_count++)) || true + ((overlap_count++)) || true + fi + fi + done + done + [[ $overlap_count -eq 0 ]] && echo " File overlap: (none)" + + echo "" + echo "--- PHASE 3: Assign project to all sub-issues (now visible to symphony-ts) ---" + echo "" + echo " $TOTAL sub-issues → Pipeline project ($PROJECT_SLUG)" + echo " (Relations are in place — safe to dispatch)" + + echo "" + echo "--- PHASE 4: Transition parent to Backlog ---" + echo "" + echo " $SPEC_TITLE → Backlog" + + echo "" + echo "=== Dry run complete: 1 parent + $TOTAL sub-issues + $relation_count relations + deferred project assignment ===" + exit 0 +fi + +# ── Live mode: resolve Linear config ───────────────────────────────────────── + +resolve_team_from_project + +# Resolve all workflow states in a single batch query +resolve_all_states + +# Parent issue → Draft state (fallback to Backlog) +DRAFT_STATE_NAME="" +if [[ -n "$DRAFT_STATE_ID" ]]; then + DRAFT_STATE_NAME="Draft" +elif [[ -n "$BACKLOG_STATE_ID" ]]; then + DRAFT_STATE_ID="$BACKLOG_STATE_ID" + DRAFT_STATE_NAME="Backlog" + echo "WARNING: 'Draft' state not found for team. Falling back to 'Backlog'..." >&2 +else + echo "WARNING: Neither 'Draft' nor 'Backlog' state found. Parent issue will use default state." >&2 +fi +echo "Draft state: ${DRAFT_STATE_NAME:-<default>} (ID: ${DRAFT_STATE_ID:-<default>})" + +# Sub-issues → Todo state (always) +TODO_STATE_NAME="" +if [[ -n "$TODO_STATE_ID" ]]; then + TODO_STATE_NAME="Todo" +else + echo "WARNING: 'Todo' state not found for team. Sub-issues will use default state." >&2 +fi +echo "Todo state: ${TODO_STATE_NAME:-<default>} (ID: ${TODO_STATE_ID:-<default>})" + +# ── Create or update parent issue ──────────────────────────────────────────── + +# Write spec content to temp file for stdin piping (avoids arg length limits) +SPEC_TMPFILE=$(mktemp) +GQL_TMPFILE="" +trap 'rm -f "$SPEC_TMPFILE" ${GQL_TMPFILE:+"$GQL_TMPFILE"}' EXIT +echo "$SPEC_CONTENT" > "$SPEC_TMPFILE" + +if [[ -n "$UPDATE_ISSUE_ID" ]]; then + echo "" + echo "Updating existing parent issue: $UPDATE_ISSUE_ID" + + # Build issueUpdate mutation via temp file (title/description are user-provided strings) + GQL_TMPFILE=$(mktemp) + if [[ -n "$DRAFT_STATE_ID" ]]; then + cat > "$GQL_TMPFILE" <<'GQLEOF' +mutation($issueId: String!, $title: String!, $description: String!, $stateId: String!) { + issueUpdate(id: $issueId, input: { + title: $title + description: $description + stateId: $stateId + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "updating parent issue (with state)" $LINEAR_CLI api \ + --variable "issueId=$UPDATE_ISSUE_ID" \ + --variable "title=[Spec] $SPEC_TITLE" \ + --variable "description=@$SPEC_TMPFILE" \ + --variable "stateId=$DRAFT_STATE_ID" \ + < "$GQL_TMPFILE") + else + cat > "$GQL_TMPFILE" <<'GQLEOF' +mutation($issueId: String!, $title: String!, $description: String!) { + issueUpdate(id: $issueId, input: { + title: $title + description: $description + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "updating parent issue" $LINEAR_CLI api \ + --variable "issueId=$UPDATE_ISSUE_ID" \ + --variable "title=[Spec] $SPEC_TITLE" \ + --variable "description=@$SPEC_TMPFILE" \ + < "$GQL_TMPFILE") + fi + rm -f "$GQL_TMPFILE"; GQL_TMPFILE="" + + success=$(echo "$result" | jq -r '.data.issueUpdate.success // false') + PARENT_ID=$(echo "$result" | jq -r '.data.issueUpdate.issue.id // empty') + parent_identifier=$(echo "$result" | jq -r '.data.issueUpdate.issue.identifier // empty') + parent_url=$(echo "$result" | jq -r '.data.issueUpdate.issue.url // empty') + PARENT_IDENTIFIER="$parent_identifier" + + if [[ "$success" == "true" && -n "$parent_identifier" ]]; then + echo " Updated: $parent_identifier ($parent_url)" + verify_issue_creation "$PARENT_ID" "$PROJECT_SLUG" + else + echo " FAILED to update parent issue" >&2 + echo " Response: $result" >&2 + exit 1 + fi +else + echo "" + echo "Creating parent issue..." + + # Spec parent: issueCreate mutation via temp file (title/description are user-provided strings) + # Includes projectId at creation time (eliminates separate issues update --project call) + GQL_TMPFILE=$(mktemp) + if [[ -n "$DRAFT_STATE_ID" ]]; then + cat > "$GQL_TMPFILE" <<'GQLEOF' +mutation($title: String!, $description: String!, $teamId: String!, $projectId: String!, $stateId: String!) { + issueCreate(input: { + title: $title + description: $description + teamId: $teamId + projectId: $projectId + stateId: $stateId + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "creating parent issue (with state)" $LINEAR_CLI api \ + --variable "title=[Spec] $SPEC_TITLE" \ + --variable "description=@$SPEC_TMPFILE" \ + --variable "teamId=$TEAM_ID" \ + --variable "projectId=$PROJECT_ID" \ + --variable "stateId=$DRAFT_STATE_ID" \ + < "$GQL_TMPFILE") + else + cat > "$GQL_TMPFILE" <<'GQLEOF' +mutation($title: String!, $description: String!, $teamId: String!, $projectId: String!) { + issueCreate(input: { + title: $title + description: $description + teamId: $teamId + projectId: $projectId + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "creating parent issue" $LINEAR_CLI api \ + --variable "title=[Spec] $SPEC_TITLE" \ + --variable "description=@$SPEC_TMPFILE" \ + --variable "teamId=$TEAM_ID" \ + --variable "projectId=$PROJECT_ID" \ + < "$GQL_TMPFILE") + fi + rm -f "$GQL_TMPFILE"; GQL_TMPFILE="" + + success=$(echo "$result" | jq -r '.data.issueCreate.success // false') + PARENT_ID=$(echo "$result" | jq -r '.data.issueCreate.issue.id // empty') + parent_identifier=$(echo "$result" | jq -r '.data.issueCreate.issue.identifier // empty') + parent_url=$(echo "$result" | jq -r '.data.issueCreate.issue.url // empty') + PARENT_IDENTIFIER="$parent_identifier" + + if [[ "$success" == "true" && -n "$parent_identifier" && -n "$PARENT_ID" ]]; then + echo " Created parent: $parent_identifier ($parent_url)" + verify_issue_creation "$PARENT_ID" "$PROJECT_SLUG" + else + echo " FAILED to create parent issue" >&2 + echo " Response: $result" >&2 + exit 1 + fi +fi + +# ── Parent-only mode: exit after parent creation ───────────────────────────── + +if [[ "$PARENT_ONLY" == true ]]; then + echo "" + echo "=== Done (--parent-only) ===" + echo "Parent: $PARENT_IDENTIFIER ($parent_url)" + echo "" + echo "Run again without --parent-only (with --update $PARENT_IDENTIFIER) to create sub-issues." + exit 0 +fi + +# F3: Set parent reference for sub-issue bodies (live mode uses Linear URL) +PARENT_REF_LINE="Parent spec: [$PARENT_IDENTIFIER]($parent_url)" + +# ── Phase 1: Create sub-issues with interleaved relations (no projectId) ───── +# Sub-issues are created in Todo state WITHOUT projectId, sorted by priority. +# After each sub-issue (except the first), a sequential blockedBy relation is +# immediately added to the previous sub-issue before creating the next one. +# projectId is deferred to Phase 3 (after all relations) to prevent symphony-ts +# from dispatching a sub-issue before its blocking relations are established. + +declare -a SUB_ISSUE_IDS SUB_ISSUE_IDENTIFIERS +echo "" +echo "Creating $TOTAL sub-issues WITHOUT project (relations interleaved, project deferred)..." +# Sequential chain: skip first sub-issue (k=0, no blocker); k>=1 adds blockedBy to previous + +relation_count=0 +# Track created relations to avoid duplicates (bash 3.2 compatible — no associative arrays) +CREATED_RELATIONS="" + +# Previous sub-issue tracking for sequential chain +prev_sub_id="" +prev_sub_ident="" + +for ((k=0; k<TOTAL; k++)); do + i="${SORTED_INDICES[$k]}" + title="${TASK_TITLES[$i]}" + sub_body=$(build_sub_issue_body "$i") + + # Extract priority if present + pri_num=$(echo "${TASK_BODIES[$i]}" | grep -oE '\*\*Priority\*\*:[[:space:]]*[0-9]+' | grep -oE '[0-9]+' | head -1 || true) + linear_priority=${pri_num:-3} + + # Write sub-issue body to temp file for description + echo "$sub_body" > "$SPEC_TMPFILE" + + # Build sub-issue issueCreate mutation via temp file (title/description are user-provided strings) + # projectId is deliberately OMITTED here — assigned after all relations are in place. + # This prevents a race condition where symphony-ts polls the Pipeline project and dispatches + # a sub-issue before its blocking relations are established. + # Priority is inlined as integer literal to avoid Int/String type coercion issues with -v flag. + GQL_TMPFILE=$(mktemp) + if [[ -n "$TODO_STATE_ID" ]]; then + cat > "$GQL_TMPFILE" <<GQLEOF +mutation(\$title: String!, \$description: String!, \$teamId: String!, \$parentId: String!, \$stateId: String!) { + issueCreate(input: { + title: \$title + description: \$description + teamId: \$teamId + parentId: \$parentId + stateId: \$stateId + priority: ${linear_priority} + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "creating sub-issue (with state)" $LINEAR_CLI api \ + --variable "title=$title" \ + --variable "description=@$SPEC_TMPFILE" \ + --variable "teamId=$TEAM_ID" \ + --variable "parentId=$PARENT_ID" \ + --variable "stateId=$TODO_STATE_ID" \ + < "$GQL_TMPFILE") + else + cat > "$GQL_TMPFILE" <<GQLEOF +mutation(\$title: String!, \$description: String!, \$teamId: String!, \$parentId: String!) { + issueCreate(input: { + title: \$title + description: \$description + teamId: \$teamId + parentId: \$parentId + priority: ${linear_priority} + }) { + success + issue { id identifier url } + } +} +GQLEOF + result=$(run_with_timeout "creating sub-issue" $LINEAR_CLI api \ + --variable "title=$title" \ + --variable "description=@$SPEC_TMPFILE" \ + --variable "teamId=$TEAM_ID" \ + --variable "parentId=$PARENT_ID" \ + < "$GQL_TMPFILE") + fi + rm -f "$GQL_TMPFILE"; GQL_TMPFILE="" + + success=$(echo "$result" | jq -r '.data.issueCreate.success // false') + sub_identifier=$(echo "$result" | jq -r '.data.issueCreate.issue.identifier // empty') + sub_url=$(echo "$result" | jq -r '.data.issueCreate.issue.url // empty') + sub_id=$(echo "$result" | jq -r '.data.issueCreate.issue.id // empty') + + if [[ "$success" == "true" && -n "$sub_identifier" && -n "$sub_id" ]]; then + # Sequential blocking: skip first (k=0, no blocker); for k>=1 add blockedBy to previous + SUB_ISSUE_IDS[$i]="$sub_id" + SUB_ISSUE_IDENTIFIERS[$i]="$sub_identifier" + echo " Created sub-issue: $sub_identifier — $title ($sub_url)" + if [[ $k -ge 1 && -n "$prev_sub_id" ]]; then + if create_blocks_relation "$prev_sub_id" "$sub_id" "$prev_sub_ident" "$sub_identifier" "sequential"; then + verify_blocking_relation "$prev_sub_id" "$sub_id" "$prev_sub_ident" "$sub_identifier" + CREATED_RELATIONS="${CREATED_RELATIONS}|${prev_sub_ident}:${sub_identifier}" + ((relation_count++)) + fi + fi + # NOTE: verify_issue_creation is deferred to after project assignment (Phase 3). + # Sub-issues are created WITHOUT projectId to prevent symphony-ts dispatch race. + + prev_sub_id="$sub_id" + prev_sub_ident="$sub_identifier" + else + echo " FAILED: $title" >&2 + echo " Response: $result" >&2 + SUB_ISSUE_IDS[$i]="" + SUB_ISSUE_IDENTIFIERS[$i]="" + fi +done + +# ── Phase 2: File-overlap relations (second pass) ──────────────────────────── +# Supplementary relations based on file overlap — don't affect dispatch order. + +echo "" +echo "Creating file-overlap blockedBy relations..." + +for ((i=0; i<TOTAL; i++)); do + for ((j=i+1; j<TOTAL; j++)); do + if detect_overlap "${TASK_SCOPES[$i]:-}" "${TASK_SCOPES[$j]:-}"; then + blocker_id="${SUB_ISSUE_IDS[$i]:-}" + blocked_id="${SUB_ISSUE_IDS[$j]:-}" + blocker="${SUB_ISSUE_IDENTIFIERS[$i]:-}" + blocked="${SUB_ISSUE_IDENTIFIERS[$j]:-}" + + if [[ -n "$blocker_id" && -n "$blocked_id" ]]; then + relation_key="${blocker}:${blocked}" + if [[ "$CREATED_RELATIONS" != *"|${relation_key}"* ]]; then + if create_blocks_relation "$blocker_id" "$blocked_id" "$blocker" "$blocked" "file overlap"; then + verify_blocking_relation "$blocker_id" "$blocked_id" "$blocker" "$blocked" + CREATED_RELATIONS="${CREATED_RELATIONS}|${relation_key}" + ((relation_count++)) + fi + fi + fi + fi + done +done + +[[ $relation_count -eq 0 ]] && echo " (none)" + +# ── Assign project to all sub-issues (deferred to avoid race condition) ────── +# Sub-issues were created WITHOUT projectId so symphony-ts can't dispatch them +# before blocking relations are in place. Now that all relations are created and +# verified, we batch-assign the Pipeline project to make them visible to the +# orchestrator. + +echo "" +echo "Assigning sub-issues to project (deferred — relations are now in place)..." + +assign_failures=0 +for ((k=0; k<TOTAL; k++)); do + i="${SORTED_INDICES[$k]}" + sub_id="${SUB_ISSUE_IDS[$i]:-}" + sub_ident="${SUB_ISSUE_IDENTIFIERS[$i]:-}" + if [[ -n "$sub_id" ]]; then + GQL_TMPFILE=$(mktemp) + cat > "$GQL_TMPFILE" <<'GQLEOF' +mutation($issueId: String!, $projectId: String!) { + issueUpdate(id: $issueId, input: { projectId: $projectId }) { + success + } +} +GQLEOF + result=$(run_with_timeout "assigning project to $sub_ident" $LINEAR_CLI api \ + --variable "issueId=$sub_id" \ + --variable "projectId=$PROJECT_ID" \ + < "$GQL_TMPFILE") + rm -f "$GQL_TMPFILE"; GQL_TMPFILE="" + + success=$(echo "$result" | jq -r '.data.issueUpdate.success // false') + if [[ "$success" == "true" ]]; then + echo " $sub_ident → Pipeline project" + else + echo " WARNING: Failed to assign $sub_ident to project" >&2 + echo " Response: $result" >&2 + ((assign_failures++)) + fi + # Post-assignment verification: confirm project slug and parent + verify_issue_creation "$sub_id" "$PROJECT_SLUG" "$PARENT_ID" + fi +done + +if [[ $assign_failures -gt 0 ]]; then + echo "ERROR: $assign_failures sub-issue(s) failed project assignment. Issues remain invisible to symphony-ts." >&2 + echo " Manual fix: assign them to project $PROJECT_SLUG in Linear UI or re-run the script." >&2 + exit 1 +fi + +# ── Phase 4: Transition parent to Backlog (sub-issues now frozen) ──────────── +# Only reached when PARENT_ONLY=false (--parent-only exits earlier) + +echo "" +# Transition parent to Backlog via issueUpdate GraphQL mutation using stateId +GQL_TMPFILE=$(mktemp) +cat > "$GQL_TMPFILE" <<GQLEOF +mutation { issueUpdate(id: "${PARENT_ID}", input: { stateId: "${BACKLOG_STATE_ID}" }) { success issue { id } } } +GQLEOF +run_with_timeout "final parent update" $LINEAR_CLI api < "$GQL_TMPFILE" > /dev/null 2>&1 || true +rm -f "$GQL_TMPFILE"; GQL_TMPFILE="" +echo "Parent $PARENT_IDENTIFIER transitioned to Backlog" + +# ── Summary ────────────────────────────────────────────────────────────────── + +echo "" +echo "=== Done ===" +echo "Parent: $PARENT_IDENTIFIER ($parent_url)" +echo "Sub-issues: $TOTAL created" +echo "Relations: $relation_count blockedBy relations" +echo "" +echo "Symphony-ts will pick up these issues automatically when the pipeline runs." diff --git a/skills/spec-gen/scripts/verify-line-linter.sh b/skills/spec-gen/scripts/verify-line-linter.sh new file mode 100755 index 00000000..dc3ef9e3 --- /dev/null +++ b/skills/spec-gen/scripts/verify-line-linter.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# verify-line-linter.sh — Enforces # Verify: presence and $BASE_URL convention +# Workaround for OpenSpec validate not supporting custom content rules +# ~30 lines as specced + +set -euo pipefail + +SPECS_DIR="${1:-openspec/specs}" +ERRORS=0 + +for spec_file in $(find "$SPECS_DIR" -name "*.md" -type f); do + # Find THEN/AND lines that should have verify lines + line_num=0 + prev_was_then_or_and=false + + while IFS= read -r line; do + ((line_num++)) + + if echo "$line" | grep -qE '^\s*(Then|And) '; then + prev_was_then_or_and=true + then_line=$line_num + continue + fi + + if $prev_was_then_or_and; then + if ! echo "$line" | grep -q '# Verify:'; then + echo "ERROR: $spec_file:$then_line — THEN/AND clause missing # Verify: line" + ((ERRORS++)) + elif echo "$line" | grep -q 'localhost'; then + echo "ERROR: $spec_file:$line_num — Verify line contains hardcoded localhost (use \$BASE_URL)" + ((ERRORS++)) + fi + prev_was_then_or_and=false + fi + done < "$spec_file" +done + +if [ $ERRORS -gt 0 ]; then + echo "" + echo "FAIL: $ERRORS verify-line violations found" + exit 1 +fi + +echo "PASS: All verify lines present and using \$BASE_URL" diff --git a/src/agent/prompt-builder.ts b/src/agent/prompt-builder.ts index 4a09ed5c..479ff6a9 100644 --- a/src/agent/prompt-builder.ts +++ b/src/agent/prompt-builder.ts @@ -35,6 +35,8 @@ export interface RenderPromptInput { workflow: Pick<WorkflowDefinition, "promptTemplate">; issue: Issue; attempt: number | null; + stageName?: string | null; + reworkCount?: number; } export interface BuildTurnPromptInput extends RenderPromptInput { @@ -57,6 +59,8 @@ export async function renderPrompt(input: RenderPromptInput): Promise<string> { return await liquidEngine.render(parsedTemplate, { issue: toTemplateIssue(input.issue), attempt: input.attempt, + stageName: input.stageName ?? null, + reworkCount: input.reworkCount ?? 0, }); } catch (error) { throw toPromptTemplateError(error); @@ -75,6 +79,7 @@ export async function buildTurnPrompt( attempt: input.attempt, turnNumber: input.turnNumber, maxTurns: input.maxTurns, + stageName: input.stageName ?? null, }); } @@ -83,13 +88,14 @@ export function buildContinuationPrompt(input: { attempt: number | null; turnNumber: number; maxTurns: number; + stageName?: string | null; }): string { const attemptLine = input.attempt === null ? "This worker session started from the initial dispatch." : `This worker session is running retry/continuation attempt ${input.attempt}.`; - return [ + const lines = [ `Continue working on issue ${input.issue.identifier}: ${input.issue.title}.`, `This is continuation turn ${input.turnNumber} of ${input.maxTurns} in the current worker session.`, attemptLine, @@ -97,7 +103,36 @@ export function buildContinuationPrompt(input: { "Reuse the existing thread context and current workspace state.", "Do not restate the original task prompt unless it is strictly needed.", "Make the next best progress on the issue, then stop when this session has no further useful work to do.", - ].join("\n"); + ]; + + if (input.stageName) { + lines.push(`Current stage: ${input.stageName}.`); + + switch (input.stageName) { + case "investigate": + lines.push( + "CONSTRAINT: You are in the INVESTIGATE stage. Do NOT implement code, create branches, or open PRs. Investigation and planning only. When you have posted your investigation findings, output the exact text [STAGE_COMPLETE] as the last line of your final message.", + ); + break; + case "implement": + lines.push( + "You are in the IMPLEMENT stage. Focus on implementing the code changes, running tests, and opening a PR. When you have opened a PR and all verify commands pass, output the exact text [STAGE_COMPLETE] as the last line of your final message.", + ); + break; + case "merge": + lines.push( + "You are in the MERGE stage. Merge the PR and verify the merge succeeded. When you have successfully merged the PR, output the exact text [STAGE_COMPLETE] as the last line of your final message.", + ); + break; + default: + lines.push( + `When you have completed the ${input.stageName} stage, output the exact text [STAGE_COMPLETE] as the last line of your final message.`, + ); + break; + } + } + + return lines.join("\n"); } function toTemplateIssue(issue: Issue): Record<string, unknown> { diff --git a/src/agent/runner.ts b/src/agent/runner.ts index d518fab7..7eef6d08 100644 --- a/src/agent/runner.ts +++ b/src/agent/runner.ts @@ -7,7 +7,11 @@ import { type CodexTurnResult, } from "../codex/app-server-client.js"; import { createLinearGraphqlDynamicTool } from "../codex/linear-graphql-tool.js"; -import type { ResolvedWorkflowConfig } from "../config/types.js"; +import { createWorkpadSyncDynamicTool } from "../codex/workpad-sync-tool.js"; +import type { + ResolvedWorkflowConfig, + StageDefinition, +} from "../config/types.js"; import { type Issue, type LiveSession, @@ -16,8 +20,12 @@ import { type Workspace, createEmptyLiveSession, normalizeIssueState, + parseFailureSignal, } from "../domain/model.js"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; import { applyCodexEventToSession } from "../logging/session-metrics.js"; +import { createRunnerFromConfig, isAiSdkRunner } from "../runners/factory.js"; +import type { RunnerKind } from "../runners/types.js"; import type { IssueTracker } from "../tracker/tracker.js"; import { WorkspaceHookRunner } from "../workspace/hooks.js"; import { validateWorkspaceCwd } from "../workspace/path-safety.js"; @@ -33,6 +41,8 @@ export interface AgentRunnerEvent extends CodexClientEvent { attempt: number | null; workspacePath: string; turnCount: number; + promptChars?: number; + estimatedPromptTokens?: number; } export interface AgentRunnerCodexClient { @@ -73,6 +83,9 @@ export interface AgentRunInput { issue: Issue; attempt: number | null; signal?: AbortSignal; + stage?: StageDefinition | null; + stageName?: string | null; + reworkCount?: number; } export interface AgentRunResult { @@ -149,7 +162,11 @@ export class AgentRunner { hooks: this.hooks, }); this.createCodexClient = - options.createCodexClient ?? createDefaultCodexClient; + options.createCodexClient ?? + createDefaultClientFactory( + options.config.runner.kind, + options.config.runner.model, + ); this.fetchFn = options.fetchFn; this.onEvent = options.onEvent; } @@ -166,11 +183,19 @@ export class AgentRunner { issueIdentifier: issue.identifier, attempt: input.attempt, workspacePath: "", - startedAt: new Date().toISOString(), + startedAt: formatEasternTimestamp(new Date()), status: "preparing_workspace", }; const abortController = createAgentAbortController(input.signal); + // Resolve effective config from stage overrides, falling back to global + const stage = input.stage ?? null; + const effectiveRunnerKind = (stage?.runner ?? + this.config.runner.kind) as RunnerKind; + const effectiveModel = stage?.model ?? this.config.runner.model; + const effectiveMaxTurns = stage?.maxTurns ?? this.config.agent.maxTurns; + const effectivePromptTemplate = stage?.prompt ?? this.config.promptTemplate; + try { abortController.throwIfAborted({ issue, @@ -179,6 +204,21 @@ export class AgentRunner { liveSession, }); + // On fresh dispatch with stages at the initial stage, remove stale workspace + // for a clean start. For flat dispatch (no stages) or continuation attempts, + // preserve the workspace so interrupted work survives restarts. + if ( + input.attempt === null && + input.stageName !== null && + input.stageName === (this.config.stages?.initialStage ?? null) + ) { + try { + await this.workspaceManager.removeForIssue(issue.id); + } catch { + // Best-effort: workspace may not exist + } + } + workspace = await this.workspaceManager.createForIssue(issue.id); runAttempt.workspacePath = validateWorkspaceCwd({ cwd: workspace.path, @@ -194,7 +234,17 @@ export class AgentRunner { }); runAttempt.status = "launching_agent_process"; - client = this.createCodexClient({ + let currentPromptChars = 0; + let currentEstimatedPromptTokens = 0; + const effectiveClientFactory = isAiSdkRunner(effectiveRunnerKind) + ? (factoryInput: AgentRunnerCodexClientFactoryInput) => + createRunnerFromConfig({ + config: { kind: effectiveRunnerKind, model: effectiveModel }, + cwd: factoryInput.cwd, + onEvent: factoryInput.onEvent, + }) + : this.createCodexClient; + client = effectiveClientFactory({ command: this.config.codex.command, cwd: workspace.path, approvalPolicy: this.config.codex.approvalPolicy, @@ -213,6 +263,8 @@ export class AgentRunner { attempt: input.attempt, workspacePath, turnCount: liveSession.turnCount, + promptChars: currentPromptChars, + estimatedPromptTokens: currentEstimatedPromptTokens, }); }, }); @@ -220,7 +272,7 @@ export class AgentRunner { for ( let turnNumber = 1; - turnNumber <= this.config.agent.maxTurns; + turnNumber <= effectiveMaxTurns; turnNumber += 1 ) { abortController.throwIfAborted({ @@ -232,13 +284,17 @@ export class AgentRunner { runAttempt.status = "building_prompt"; const prompt = await buildTurnPrompt({ workflow: { - promptTemplate: this.config.promptTemplate, + promptTemplate: effectivePromptTemplate, }, issue, attempt: input.attempt, + stageName: input.stageName ?? null, + reworkCount: input.reworkCount ?? 0, turnNumber, - maxTurns: this.config.agent.maxTurns, + maxTurns: effectiveMaxTurns, }); + currentPromptChars = prompt.length; + currentEstimatedPromptTokens = Math.ceil(prompt.length / 4); const title = `${issue.identifier}: ${issue.title}`; runAttempt.status = @@ -256,7 +312,7 @@ export class AgentRunner { : lastTurn.status === "failed" ? "turn_failed" : "turn_cancelled", - timestamp: new Date().toISOString(), + timestamp: formatEasternTimestamp(new Date()), codexAppServerPid: liveSession.codexAppServerPid, sessionId: lastTurn.sessionId, threadId: lastTurn.threadId, @@ -268,6 +324,33 @@ export class AgentRunner { ...(lastTurn.message === null ? {} : { message: lastTurn.message }), }); + // Early exit: agent signaled stage completion or failure + if (lastTurn.message?.trimEnd().endsWith("[STAGE_COMPLETE]")) { + break; + } + if ( + lastTurn.message !== null && + parseFailureSignal(lastTurn.message) !== null + ) { + break; + } + + // Turn failed at infrastructure level (e.g. abort/timeout) without an + // explicit agent failure signal — propagate so the orchestrator sees + // worker_exit_abnormal instead of the misleading worker_exit_normal. + if (lastTurn.status !== "completed") { + throw new AgentRunnerError({ + message: lastTurn.message ?? "Agent turn failed unexpectedly.", + status: "failed", + failedPhase: runAttempt.status, + issue, + // biome-ignore lint/style/noNonNullAssertion: workspace is assigned before this point in the run loop + workspace: workspace!, + runAttempt: { ...runAttempt }, + liveSession: { ...liveSession }, + }); + } + runAttempt.status = "finishing"; issue = await this.refreshIssueState(issue); if (!this.isIssueStillActive(issue)) { @@ -319,13 +402,25 @@ export class AgentRunner { return []; } - return [ + const tools: CodexDynamicTool[] = [ createLinearGraphqlDynamicTool({ endpoint: this.config.tracker.endpoint, apiKey: this.config.tracker.apiKey, ...(this.fetchFn === undefined ? {} : { fetchFn: this.fetchFn }), }), ]; + + if (this.config.tracker.apiKey !== null) { + tools.push( + createWorkpadSyncDynamicTool({ + apiKey: this.config.tracker.apiKey, + endpoint: this.config.tracker.endpoint, + ...(this.fetchFn === undefined ? {} : { fetchFn: this.fetchFn }), + }), + ); + } + + return tools; } private async refreshIssueState(issue: Issue): Promise<Issue> { @@ -409,6 +504,24 @@ async function cleanupWorkspaceArtifacts(workspacePath: string): Promise<void> { }); } +function createDefaultClientFactory( + runnerKind: string, + runnerModel: string | null = null, +): (input: AgentRunnerCodexClientFactoryInput) => AgentRunnerCodexClient { + const kind = runnerKind as RunnerKind; + + if (isAiSdkRunner(kind)) { + return (input) => + createRunnerFromConfig({ + config: { kind, model: runnerModel }, + cwd: input.cwd, + onEvent: input.onEvent, + }); + } + + return createDefaultCodexClient; +} + function createDefaultCodexClient( input: AgentRunnerCodexClientFactoryInput, ): AgentRunnerCodexClient { diff --git a/src/chunking.ts b/src/chunking.ts new file mode 100644 index 00000000..fabdab14 --- /dev/null +++ b/src/chunking.ts @@ -0,0 +1,75 @@ +/** + * Message chunking utilities for Slack message posting. + * + * Slack imposes a ~40,000 character limit per message. This module splits + * long responses at paragraph boundaries, falling back to hard splits when + * a single paragraph exceeds the limit. + */ + +/** Maximum characters per Slack message chunk. */ +export const SLACK_MAX_CHARS = 39_000; + +/** + * Split a response into chunks that each fit within Slack's message limit. + * + * Strategy: + * 1. Split text at paragraph boundaries (`\n\n`). + * 2. Accumulate paragraphs into chunks up to `maxChars`. + * 3. If a single paragraph exceeds `maxChars`, hard-split it. + * + * @param text - The full response text to chunk. + * @param maxChars - Maximum characters per chunk (default: 39,000). + * @returns Array of string chunks, each under `maxChars`. + */ +export function chunkResponse( + text: string, + maxChars: number = SLACK_MAX_CHARS, +): string[] { + if (text.length <= maxChars) { + return [text]; + } + + const paragraphs = text.split(/\n\n+/); + const chunks: string[] = []; + let current = ""; + + for (const paragraph of paragraphs) { + const trimmed = paragraph.trim(); + if (trimmed.length === 0) { + continue; + } + + // If a single paragraph exceeds maxChars, hard-split it + if (trimmed.length > maxChars) { + // Flush current buffer first + if (current.length > 0) { + chunks.push(current); + current = ""; + } + // Hard-split the oversized paragraph + for (let i = 0; i < trimmed.length; i += maxChars) { + chunks.push(trimmed.slice(i, i + maxChars)); + } + continue; + } + + // Would adding this paragraph exceed the limit? + const separator = current.length > 0 ? "\n\n" : ""; + if (current.length + separator.length + trimmed.length > maxChars) { + // Flush current chunk and start a new one + if (current.length > 0) { + chunks.push(current); + } + current = trimmed; + } else { + current = current + separator + trimmed; + } + } + + // Flush remaining content + if (current.length > 0) { + chunks.push(current); + } + + return chunks.length > 0 ? chunks : [text]; +} diff --git a/src/cli/main.ts b/src/cli/main.ts index 5bb5147d..dff751fb 100644 --- a/src/cli/main.ts +++ b/src/cli/main.ts @@ -1,6 +1,6 @@ #!/usr/bin/env node -import { realpathSync } from "node:fs"; +import { realpathSync, writeSync } from "node:fs"; import { resolve } from "node:path"; import { fileURLToPath, pathToFileURL } from "node:url"; @@ -8,10 +8,16 @@ import { resolveWorkflowConfig } from "../config/config-resolver.js"; import { WORKFLOW_FILENAME } from "../config/defaults.js"; import { loadWorkflowDefinition } from "../config/workflow-loader.js"; import { ERROR_CODES } from "../errors/codes.js"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; +import { + PipelineNotifier, + createSlackPoster, +} from "../orchestrator/pipeline-notifier.js"; import { type RuntimeServiceHandle, startRuntimeService, } from "../orchestrator/runtime-host.js"; +import { getDisplayVersion } from "../version.js"; export const CLI_ACKNOWLEDGEMENT_FLAG = "--acknowledge-high-trust-preview"; @@ -21,6 +27,7 @@ export interface CliOptions { port: number | null; acknowledged: boolean; help: boolean; + version: boolean; } export interface CliRuntimeSettings { @@ -36,6 +43,7 @@ export interface CliHost { export interface StartCliHostInput { options: CliOptions; runtime: CliRuntimeSettings; + env: NodeJS.ProcessEnv; } export interface CliIo { @@ -67,6 +75,7 @@ export function parseCliArgs(argv: readonly string[]): CliOptions { let port: number | null = null; let acknowledged = false; let help = false; + let version = false; for (let index = 0; index < argv.length; index += 1) { const token = argv[index]; @@ -90,6 +99,11 @@ export function parseCliArgs(argv: readonly string[]): CliOptions { continue; } + if (token === "--version" || token === "-V") { + version = true; + continue; + } + if (token === CLI_ACKNOWLEDGEMENT_FLAG) { acknowledged = true; continue; @@ -125,6 +139,7 @@ export function parseCliArgs(argv: readonly string[]): CliOptions { port, acknowledged, help, + version, }; } @@ -148,9 +163,20 @@ export function applyCliOverrides( export async function startCliHost( input: StartCliHostInput, ): Promise<RuntimeServiceHandle> { + const slackChannel = input.runtime.config.server.slackNotifyChannel; + const slackToken = input.env.SLACK_BOT_TOKEN; + const notifier = + slackChannel !== null && slackToken !== undefined + ? new PipelineNotifier({ + channel: slackChannel, + poster: createSlackPoster({ botToken: slackToken }), + }) + : null; + return startRuntimeService({ config: input.runtime.config, logsRoot: input.runtime.logsRoot, + notifier, }); } @@ -178,6 +204,11 @@ export async function runCli( return 1; } + if (options.version) { + io.stdout(`symphony-ts ${getDisplayVersion()}\n`); + return 0; + } + if (options.help) { io.stdout(renderUsage()); return 0; @@ -201,6 +232,7 @@ export async function runCli( const host = await startHost({ options, runtime, + env, }); const exitCode = await host.waitForExit(); @@ -216,6 +248,51 @@ export async function runCli( } } +function safeErrorMessage(error: unknown): string { + if (error instanceof Error) return error.message; + try { + return String(error); + } catch { + return "[non-stringifiable value]"; + } +} + +export function handleUncaughtException(error: unknown): void { + const entry = { + timestamp: formatEasternTimestamp(new Date()), + level: "error", + event: "process_crash", + message: safeErrorMessage(error), + error_code: "uncaught_exception", + stack: error instanceof Error ? error.stack : undefined, + }; + process.exitCode = 70; + try { + writeSync(2, `${JSON.stringify(entry)}\n`); + } catch { + // Ignore write errors during crash — exiting is the priority. + } + process.exit(70); +} + +export function handleUnhandledRejection(reason: unknown): void { + const entry = { + timestamp: formatEasternTimestamp(new Date()), + level: "error", + event: "process_crash", + message: safeErrorMessage(reason), + error_code: "unhandled_rejection", + stack: reason instanceof Error ? reason.stack : undefined, + }; + process.exitCode = 70; + try { + writeSync(2, `${JSON.stringify(entry)}\n`); + } catch { + // Ignore write errors during crash — exiting is the priority. + } + process.exit(70); +} + export async function main(): Promise<void> { const exitCode = await runCli(process.argv.slice(2)); process.exitCode = exitCode; @@ -287,5 +364,7 @@ function renderUsage(): string { } if (shouldRunAsCli(import.meta.url, process.argv[1])) { - void main(); + process.on("uncaughtException", handleUncaughtException); + process.on("unhandledRejection", handleUnhandledRejection); + void main().catch(handleUnhandledRejection); } diff --git a/src/codex/app-server-client.ts b/src/codex/app-server-client.ts index aa40e130..7e3a4c7b 100644 --- a/src/codex/app-server-client.ts +++ b/src/codex/app-server-client.ts @@ -1,10 +1,12 @@ import { type ChildProcessWithoutNullStreams, spawn } from "node:child_process"; import { ERROR_CODES } from "../errors/codes.js"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; +import { VERSION } from "../version.js"; const DEFAULT_CLIENT_INFO = Object.freeze({ name: "symphony-ts", - version: "0.1.0", + version: VERSION, }); const DEFAULT_MAX_LINE_BYTES = 10 * 1024 * 1024; @@ -16,6 +18,10 @@ export interface CodexUsage { inputTokens: number; outputTokens: number; totalTokens: number; + cacheReadTokens?: number; + cacheWriteTokens?: number; + noCacheTokens?: number; + reasoningTokens?: number; } export type CodexTurnStatus = "completed" | "failed" | "cancelled"; @@ -33,7 +39,8 @@ export interface CodexClientEvent { | "unsupported_tool_call" | "notification" | "other_message" - | "malformed"; + | "malformed" + | "activity_heartbeat"; timestamp: string; codexAppServerPid: string | null; sessionId?: string | null; @@ -783,7 +790,7 @@ export class CodexAppServerClient { ): void { this.options.onEvent?.({ ...input, - timestamp: new Date().toISOString(), + timestamp: formatEasternTimestamp(new Date()), codexAppServerPid: this.child?.pid === undefined ? null : String(this.child.pid), }); @@ -977,25 +984,44 @@ function extractUsage(message: JsonObject): CodexUsage | null { } function coerceUsage(value: JsonObject): CodexUsage | null { - const aliases = [ + const specificAliases = [ ["inputTokens", "outputTokens", "totalTokens"], ["input_tokens", "output_tokens", "total_tokens"], - ["input", "output", "total"], ] as const; - for (const [inputKey, outputKey, totalKey] of aliases) { + // Check specific aliases first (input + output sufficient) + for (const [inputKey, outputKey, totalKey] of specificAliases) { const input = asFiniteNumber(value[inputKey]); const output = asFiniteNumber(value[outputKey]); const total = asFiniteNumber(value[totalKey]); - if (input !== null && output !== null && total !== null) { + // Accept usage if at least input and output are present; total is optional. + if (input !== null && output !== null) { return { inputTokens: input, outputTokens: output, - totalTokens: total, + totalTokens: total ?? input + output, + ...extractExtendedTokenFields(value), }; } } + // Check generic alias (require all 3 fields to avoid false matches) + const genericInput = asFiniteNumber(value.input); + const genericOutput = asFiniteNumber(value.output); + const genericTotal = asFiniteNumber(value.total); + if ( + genericInput !== null && + genericOutput !== null && + genericTotal !== null + ) { + return { + inputTokens: genericInput, + outputTokens: genericOutput, + totalTokens: genericTotal, + ...extractExtendedTokenFields(value), + }; + } + if ("total_token_usage" in value) { const nested = value.total_token_usage; if ( @@ -1010,6 +1036,61 @@ function coerceUsage(value: JsonObject): CodexUsage | null { return null; } +/** + * Extract optional extended token fields (cache, reasoning) from a usage object. + * Handles both camelCase and snake_case variants. + */ +function extractExtendedTokenFields( + value: JsonObject, +): Partial< + Pick< + CodexUsage, + "cacheReadTokens" | "cacheWriteTokens" | "noCacheTokens" | "reasoningTokens" + > +> { + const result: Partial< + Pick< + CodexUsage, + | "cacheReadTokens" + | "cacheWriteTokens" + | "noCacheTokens" + | "reasoningTokens" + > + > = {}; + + const cacheRead = + asFiniteNumber(value.cacheReadTokens) ?? + asFiniteNumber(value.cache_read_tokens) ?? + asFiniteNumber(value.cache_read_input_tokens); + if (cacheRead !== null) { + result.cacheReadTokens = cacheRead; + } + + const cacheWrite = + asFiniteNumber(value.cacheWriteTokens) ?? + asFiniteNumber(value.cache_write_tokens) ?? + asFiniteNumber(value.cache_creation_input_tokens); + if (cacheWrite !== null) { + result.cacheWriteTokens = cacheWrite; + } + + const noCache = + asFiniteNumber(value.noCacheTokens) ?? + asFiniteNumber(value.no_cache_tokens); + if (noCache !== null) { + result.noCacheTokens = noCache; + } + + const reasoning = + asFiniteNumber(value.reasoningTokens) ?? + asFiniteNumber(value.reasoning_tokens); + if (reasoning !== null) { + result.reasoningTokens = reasoning; + } + + return result; +} + function extractRateLimits( message: JsonObject, ): Record<string, unknown> | null { diff --git a/src/codex/workpad-sync-tool.ts b/src/codex/workpad-sync-tool.ts new file mode 100644 index 00000000..985bb826 --- /dev/null +++ b/src/codex/workpad-sync-tool.ts @@ -0,0 +1,300 @@ +import { readFile } from "node:fs/promises"; + +import type { CodexDynamicTool } from "./app-server-client.js"; + +const WORKPAD_SYNC_DESCRIPTION = + "Create or update a workpad comment on a Linear issue. Reads body from a local file to keep conversation context small."; + +const LINEAR_GRAPHQL_ENDPOINT = "https://api.linear.app/graphql"; + +type JsonObject = Record<string, unknown>; + +export interface WorkpadSyncToolInput { + issue_id: string; + file_path: string; + comment_id?: string; +} + +export interface WorkpadSyncToolResult { + success: boolean; + comment_id?: string; + error?: { + code: string; + message: string; + details?: unknown; + }; +} + +export interface WorkpadSyncDynamicToolOptions { + apiKey: string; + endpoint?: string; + networkTimeoutMs?: number; + fetchFn?: typeof fetch; +} + +export const WORKPAD_SYNC_TOOL_NAME = "sync_workpad"; + +export function createWorkpadSyncDynamicTool( + options: WorkpadSyncDynamicToolOptions, +): CodexDynamicTool { + const endpoint = options.endpoint ?? LINEAR_GRAPHQL_ENDPOINT; + const networkTimeoutMs = options.networkTimeoutMs ?? 30_000; + const fetchFn = options.fetchFn ?? globalThis.fetch; + + return { + name: WORKPAD_SYNC_TOOL_NAME, + description: WORKPAD_SYNC_DESCRIPTION, + inputSchema: { + type: "object", + additionalProperties: false, + required: ["issue_id", "file_path"], + properties: { + issue_id: { + type: "string", + minLength: 1, + description: "The Linear issue ID to attach the workpad comment to.", + }, + file_path: { + type: "string", + minLength: 1, + description: + "Local file path to read workpad content from (e.g. workpad.md).", + }, + comment_id: { + type: "string", + description: + "If provided, update this existing comment. If omitted, create a new comment.", + }, + }, + }, + async execute(input: unknown): Promise<WorkpadSyncToolResult> { + const normalized = normalizeInput(input); + if (!normalized.success) { + return normalized; + } + + let body: string; + try { + body = await readFile(normalized.file_path, "utf-8"); + } catch (error) { + return { + success: false, + error: { + code: "file_read_error", + message: + error instanceof Error + ? `Failed to read workpad file: ${error.message}` + : "Failed to read workpad file.", + }, + }; + } + + try { + if (normalized.comment_id !== undefined) { + const response = await executeGraphql( + endpoint, + options.apiKey, + networkTimeoutMs, + fetchFn, + COMMENT_UPDATE_MUTATION, + { commentId: normalized.comment_id, body }, + ); + const update = response.commentUpdate; + if ( + update === null || + typeof update !== "object" || + Array.isArray(update) || + (update as Record<string, unknown>).success !== true + ) { + return { + success: false, + error: { + code: "linear_response_malformed", + message: "Linear commentUpdate did not return success.", + details: response, + }, + }; + } + return { + success: true, + comment_id: normalized.comment_id, + }; + } + + const response = await executeGraphql( + endpoint, + options.apiKey, + networkTimeoutMs, + fetchFn, + COMMENT_CREATE_MUTATION, + { issueId: normalized.issue_id, body }, + ); + + const commentId = extractCommentId(response); + if (commentId === null) { + return { + success: false, + error: { + code: "linear_response_malformed", + message: + "Linear commentCreate succeeded but did not return a comment ID.", + details: response, + }, + }; + } + + return { + success: true, + comment_id: commentId, + }; + } catch (error) { + return { + success: false, + error: { + code: "linear_api_request", + message: + error instanceof Error + ? error.message + : "Linear API request failed.", + }, + }; + } + }, + }; +} + +const COMMENT_CREATE_MUTATION = ` + mutation CommentCreate($issueId: String!, $body: String!) { + commentCreate(input: { issueId: $issueId, body: $body }) { + success + comment { + id + } + } + } +`; + +const COMMENT_UPDATE_MUTATION = ` + mutation CommentUpdate($commentId: String!, $body: String!) { + commentUpdate(id: $commentId, input: { body: $body }) { + success + } + } +`; + +function normalizeInput(input: unknown): + | (WorkpadSyncToolResult & { success: false }) + | { + success: true; + issue_id: string; + file_path: string; + comment_id?: string; + } { + if (input === null || typeof input !== "object" || Array.isArray(input)) { + return invalidInput( + "sync_workpad expects an object with issue_id and file_path.", + ); + } + + const issueId = "issue_id" in input ? input.issue_id : undefined; + if (typeof issueId !== "string" || issueId.trim().length === 0) { + return invalidInput("sync_workpad.issue_id must be a non-empty string."); + } + + const filePath = "file_path" in input ? input.file_path : undefined; + if (typeof filePath !== "string" || filePath.trim().length === 0) { + return invalidInput("sync_workpad.file_path must be a non-empty string."); + } + + const commentId = "comment_id" in input ? input.comment_id : undefined; + if (commentId !== undefined && typeof commentId !== "string") { + return invalidInput( + "sync_workpad.comment_id must be a string if provided.", + ); + } + + return { + success: true, + issue_id: issueId, + file_path: filePath, + ...(commentId === undefined ? {} : { comment_id: commentId }), + }; +} + +function invalidInput( + message: string, + details?: unknown, +): WorkpadSyncToolResult & { success: false } { + return { + success: false, + error: { + code: "invalid_input", + message, + details: details ?? null, + }, + }; +} + +async function executeGraphql( + endpoint: string, + apiKey: string, + networkTimeoutMs: number, + fetchFn: typeof fetch, + query: string, + variables: JsonObject, +): Promise<JsonObject> { + const response = await fetchFn(endpoint, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: apiKey, + }, + body: JSON.stringify({ query, variables }), + signal: AbortSignal.timeout(networkTimeoutMs), + }); + + if (!response.ok) { + throw new Error(`Linear API returned HTTP ${response.status}.`); + } + + const body = (await response.json()) as JsonObject; + const errors = body.errors; + if (Array.isArray(errors) && errors.length > 0) { + throw new Error(`Linear GraphQL errors: ${JSON.stringify(errors)}`); + } + + const data = body.data; + if (data === null || typeof data !== "object" || Array.isArray(data)) { + throw new Error("Linear API returned unexpected response format."); + } + + return data as JsonObject; +} + +function extractCommentId(data: JsonObject): string | null { + const commentCreate = data.commentCreate; + if ( + commentCreate === null || + typeof commentCreate !== "object" || + Array.isArray(commentCreate) + ) { + return null; + } + + const ccObj = commentCreate as JsonObject; + if (ccObj.success !== true) { + return null; + } + + const comment = ccObj.comment; + if ( + comment === null || + typeof comment !== "object" || + Array.isArray(comment) + ) { + return null; + } + + const id = (comment as JsonObject).id; + return typeof id === "string" && id.length > 0 ? id : null; +} diff --git a/src/config/config-resolver.ts b/src/config/config-resolver.ts index ccd2e8bd..229e16b4 100644 --- a/src/config/config-resolver.ts +++ b/src/config/config-resolver.ts @@ -13,6 +13,7 @@ import { DEFAULT_LINEAR_PAGE_SIZE, DEFAULT_MAX_CONCURRENT_AGENTS, DEFAULT_MAX_CONCURRENT_AGENTS_BY_STATE, + DEFAULT_MAX_RETRY_ATTEMPTS, DEFAULT_MAX_RETRY_BACKOFF_MS, DEFAULT_MAX_TURNS, DEFAULT_OBSERVABILITY_ENABLED, @@ -20,6 +21,7 @@ import { DEFAULT_OBSERVABILITY_RENDER_INTERVAL_MS, DEFAULT_POLL_INTERVAL_MS, DEFAULT_READ_TIMEOUT_MS, + DEFAULT_RUNNER_KIND, DEFAULT_STALL_TIMEOUT_MS, DEFAULT_TERMINAL_STATES, DEFAULT_TRACKER_KIND, @@ -28,8 +30,16 @@ import { } from "./defaults.js"; import type { DispatchValidationResult, + FastTrackConfig, + GateType, ResolvedWorkflowConfig, + ReviewerDefinition, + StageDefinition, + StageTransitions, + StageType, + StagesConfig, } from "./types.js"; +import { GATE_TYPES, STAGE_TYPES } from "./types.js"; const LINEAR_CANONICAL_API_KEY_ENV = "LINEAR_API_KEY"; @@ -43,6 +53,7 @@ export function resolveWorkflowConfig( const workspace = asRecord(config.workspace); const hooks = asRecord(config.hooks); const agent = asRecord(config.agent); + const runner = asRecord(config.runner); const codex = asRecord(config.codex); const server = asRecord(config.server); const observability = asRecord(config.observability); @@ -94,10 +105,17 @@ export function resolveWorkflowConfig( maxRetryBackoffMs: readPositiveInteger(agent.max_retry_backoff_ms) ?? DEFAULT_MAX_RETRY_BACKOFF_MS, + maxRetryAttempts: + readPositiveInteger(agent.max_retry_attempts) ?? + DEFAULT_MAX_RETRY_ATTEMPTS, maxConcurrentAgentsByState: readStateConcurrencyMap( agent.max_concurrent_agents_by_state, ), }, + runner: { + kind: readString(runner.kind) ?? DEFAULT_RUNNER_KIND, + model: readString(runner.model), + }, codex: { command: readString(codex.command) ?? DEFAULT_CODEX_COMMAND, approvalPolicy: codex.approval_policy, @@ -112,6 +130,10 @@ export function resolveWorkflowConfig( }, server: { port: readNonNegativeInteger(server.port), + slackNotifyChannel: + readString(server.slack_notify_channel) ?? + environment.SLACK_NOTIFY_CHANNEL ?? + null, }, observability: { dashboardEnabled: @@ -124,6 +146,8 @@ export function resolveWorkflowConfig( readPositiveInteger(observability.render_interval_ms) ?? DEFAULT_OBSERVABILITY_RENDER_INTERVAL_MS, }, + stages: resolveStagesConfig(config.stages), + escalationState: readString(config.escalation_state), }; } @@ -345,6 +369,237 @@ function resolvePathValue( return normalize(expanded); } +export function resolveStagesConfig(value: unknown): StagesConfig | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + + const raw = value as Record<string, unknown>; + const stageEntries: Record<string, StageDefinition> = {}; + let firstStageName: string | null = null; + + for (const [name, stageValue] of Object.entries(raw)) { + if (name === "initial_stage" || name === "fast_track") { + continue; + } + + const stageRecord = asRecord(stageValue); + const rawType = readString(stageRecord.type); + const stageType = parseStageType(rawType); + if (stageType === null) { + continue; + } + + if (firstStageName === null) { + firstStageName = name; + } + + stageEntries[name] = { + type: stageType, + runner: readString(stageRecord.runner), + model: readString(stageRecord.model), + prompt: readString(stageRecord.prompt), + maxTurns: readPositiveInteger(stageRecord.max_turns), + timeoutMs: readPositiveInteger(stageRecord.timeout_ms), + concurrency: readPositiveInteger(stageRecord.concurrency), + gateType: parseGateType(readString(stageRecord.gate_type)), + maxRework: readPositiveInteger(stageRecord.max_rework), + reviewers: parseReviewers(stageRecord.reviewers), + transitions: { + onComplete: readString(stageRecord.on_complete), + onApprove: readString(stageRecord.on_approve), + onRework: readString(stageRecord.on_rework), + }, + linearState: readString(stageRecord.linear_state), + }; + } + + if (Object.keys(stageEntries).length === 0) { + return null; + } + + // biome-ignore lint/style/noNonNullAssertion: firstStageName guaranteed non-null when stageEntries is non-empty + const initialStage = readString(raw.initial_stage) ?? firstStageName!; + + const fastTrackRaw = asRecord(raw.fast_track); + const fastTrackLabel = readString(fastTrackRaw.label); + const fastTrackInitialStage = readString(fastTrackRaw.initial_stage); + const fastTrack: FastTrackConfig | null = + fastTrackLabel !== null && fastTrackInitialStage !== null + ? { label: fastTrackLabel, initialStage: fastTrackInitialStage } + : null; + + return Object.freeze({ + initialStage, + fastTrack, + stages: Object.freeze(stageEntries), + }); +} + +export interface StagesValidationResult { + ok: boolean; + errors: string[]; +} + +export function validateStagesConfig( + stagesConfig: StagesConfig | null, +): StagesValidationResult { + if (stagesConfig === null) { + return { ok: true, errors: [] }; + } + + const errors: string[] = []; + const stageNames = new Set(Object.keys(stagesConfig.stages)); + + if (!stageNames.has(stagesConfig.initialStage)) { + errors.push( + `initial_stage '${stagesConfig.initialStage}' does not reference a defined stage.`, + ); + } + + if ( + stagesConfig.fastTrack != null && + !stageNames.has(stagesConfig.fastTrack.initialStage) + ) { + errors.push( + `fast_track.initial_stage '${stagesConfig.fastTrack.initialStage}' does not reference a defined stage.`, + ); + } + + let hasTerminal = false; + for (const [name, stage] of Object.entries(stagesConfig.stages)) { + if (stage.type === "terminal") { + hasTerminal = true; + continue; + } + + if (stage.type === "agent") { + if (stage.transitions.onComplete === null) { + errors.push(`Stage '${name}' (agent) has no on_complete transition.`); + } else if (!stageNames.has(stage.transitions.onComplete)) { + errors.push( + `Stage '${name}' on_complete references unknown stage '${stage.transitions.onComplete}'.`, + ); + } + + if ( + stage.transitions.onRework !== null && + !stageNames.has(stage.transitions.onRework) + ) { + errors.push( + `Stage '${name}' on_rework references unknown stage '${stage.transitions.onRework}'.`, + ); + } + } + + if (stage.type === "gate") { + if (stage.transitions.onApprove === null) { + errors.push(`Stage '${name}' (gate) has no on_approve transition.`); + } else if (!stageNames.has(stage.transitions.onApprove)) { + errors.push( + `Stage '${name}' on_approve references unknown stage '${stage.transitions.onApprove}'.`, + ); + } + + if ( + stage.transitions.onRework !== null && + !stageNames.has(stage.transitions.onRework) + ) { + errors.push( + `Stage '${name}' on_rework references unknown stage '${stage.transitions.onRework}'.`, + ); + } + } + } + + if (!hasTerminal) { + errors.push( + "No terminal stage defined. At least one stage must have type 'terminal'.", + ); + } + + // Check reachability from initial stage + const reachable = new Set<string>(); + const queue = [stagesConfig.initialStage]; + while (queue.length > 0) { + // biome-ignore lint/style/noNonNullAssertion: queue.length > 0 guarantees pop() returns a value + const current = queue.pop()!; + if (reachable.has(current)) { + continue; + } + reachable.add(current); + + const stage = stagesConfig.stages[current]; + if (stage === undefined) { + continue; + } + + for (const target of [ + stage.transitions.onComplete, + stage.transitions.onApprove, + stage.transitions.onRework, + ]) { + if (target !== null && !reachable.has(target)) { + queue.push(target); + } + } + } + + for (const name of stageNames) { + if (!reachable.has(name)) { + errors.push( + `Stage '${name}' is unreachable from initial stage '${stagesConfig.initialStage}'.`, + ); + } + } + + return { ok: errors.length === 0, errors }; +} + +function parseReviewers(value: unknown): ReviewerDefinition[] { + if (!Array.isArray(value)) { + return []; + } + + return value.flatMap((entry) => { + const record = asRecord(entry); + const runner = readString(record.runner); + const role = readString(record.role); + if (runner === null || role === null) { + return []; + } + + return [ + { + runner, + model: readString(record.model), + role, + prompt: readString(record.prompt), + }, + ]; + }); +} + +function parseStageType(value: string | null): StageType | null { + if (value === null) { + return null; + } + const normalized = value.trim().toLowerCase(); + return (STAGE_TYPES as readonly string[]).includes(normalized) + ? (normalized as StageType) + : null; +} + +function parseGateType(value: string | null): GateType | null { + if (value === null) { + return null; + } + const normalized = value.trim().toLowerCase(); + return (GATE_TYPES as readonly string[]).includes(normalized) + ? (normalized as GateType) + : null; +} + export const LINEAR_DEFAULTS = Object.freeze({ endpoint: DEFAULT_LINEAR_ENDPOINT, pageSize: DEFAULT_LINEAR_PAGE_SIZE, diff --git a/src/config/defaults.ts b/src/config/defaults.ts index 94f87ed8..e32a3faf 100644 --- a/src/config/defaults.ts +++ b/src/config/defaults.ts @@ -19,10 +19,13 @@ export const DEFAULT_HOOK_TIMEOUT_MS = 60_000; export const DEFAULT_MAX_CONCURRENT_AGENTS = 10; export const DEFAULT_MAX_TURNS = 20; export const DEFAULT_MAX_RETRY_BACKOFF_MS = 300_000; +export const DEFAULT_MAX_RETRY_ATTEMPTS = 5; export const DEFAULT_MAX_CONCURRENT_AGENTS_BY_STATE = Object.freeze( {}, ) as Readonly<Record<string, number>>; +export const DEFAULT_RUNNER_KIND = "codex"; + export const DEFAULT_CODEX_COMMAND = "codex app-server"; export const DEFAULT_TURN_TIMEOUT_MS = 3_600_000; export const DEFAULT_READ_TIMEOUT_MS = 5_000; @@ -58,8 +61,12 @@ export const SPEC_DEFAULTS = Object.freeze({ maxConcurrentAgents: DEFAULT_MAX_CONCURRENT_AGENTS, maxTurns: DEFAULT_MAX_TURNS, maxRetryBackoffMs: DEFAULT_MAX_RETRY_BACKOFF_MS, + maxRetryAttempts: DEFAULT_MAX_RETRY_ATTEMPTS, maxConcurrentAgentsByState: DEFAULT_MAX_CONCURRENT_AGENTS_BY_STATE, }, + runner: { + kind: DEFAULT_RUNNER_KIND, + }, codex: { command: DEFAULT_CODEX_COMMAND, turnTimeoutMs: DEFAULT_TURN_TIMEOUT_MS, diff --git a/src/config/types.ts b/src/config/types.ts index 0761e7ad..f11e3acf 100644 --- a/src/config/types.ts +++ b/src/config/types.ts @@ -27,9 +27,15 @@ export interface WorkflowAgentConfig { maxConcurrentAgents: number; maxTurns: number; maxRetryBackoffMs: number; + maxRetryAttempts: number; maxConcurrentAgentsByState: Readonly<Record<string, number>>; } +export interface WorkflowRunnerConfig { + kind: string; + model: string | null; +} + export interface WorkflowCodexConfig { command: string; approvalPolicy: unknown; @@ -42,6 +48,7 @@ export interface WorkflowCodexConfig { export interface WorkflowServerConfig { port: number | null; + slackNotifyChannel: string | null; } export interface WorkflowObservabilityConfig { @@ -50,6 +57,51 @@ export interface WorkflowObservabilityConfig { renderIntervalMs: number; } +export const STAGE_TYPES = ["agent", "gate", "terminal"] as const; +export type StageType = (typeof STAGE_TYPES)[number]; + +export const GATE_TYPES = ["ensemble", "human"] as const; +export type GateType = (typeof GATE_TYPES)[number]; + +export interface StageTransitions { + onComplete: string | null; + onApprove: string | null; + onRework: string | null; +} + +export interface ReviewerDefinition { + runner: string; + model: string | null; + role: string; + prompt: string | null; +} + +export interface StageDefinition { + type: StageType; + runner: string | null; + model: string | null; + prompt: string | null; + maxTurns: number | null; + timeoutMs: number | null; + concurrency: number | null; + gateType: GateType | null; + maxRework: number | null; + reviewers: ReviewerDefinition[]; + transitions: StageTransitions; + linearState: string | null; +} + +export interface FastTrackConfig { + label: string; + initialStage: string; +} + +export interface StagesConfig { + initialStage: string; + fastTrack: FastTrackConfig | null; + stages: Readonly<Record<string, StageDefinition>>; +} + export interface ResolvedWorkflowConfig { workflowPath: string; promptTemplate: string; @@ -58,9 +110,12 @@ export interface ResolvedWorkflowConfig { workspace: WorkflowWorkspaceConfig; hooks: WorkflowHooksConfig; agent: WorkflowAgentConfig; + runner: WorkflowRunnerConfig; codex: WorkflowCodexConfig; server: WorkflowServerConfig; observability: WorkflowObservabilityConfig; + stages: StagesConfig | null; + escalationState: string | null; } export interface DispatchValidationFailure { diff --git a/src/config/workflow-watch.ts b/src/config/workflow-watch.ts index 5860a87e..bc289058 100644 --- a/src/config/workflow-watch.ts +++ b/src/config/workflow-watch.ts @@ -1,5 +1,6 @@ import { type FSWatcher, watch } from "node:fs"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; import { resolveWorkflowConfig, validateDispatchConfig, @@ -46,7 +47,7 @@ export async function loadWorkflowSnapshot( definition, config, dispatchValidation: validateDispatchConfig(config), - loadedAt: new Date().toISOString(), + loadedAt: formatEasternTimestamp(new Date()), }; } diff --git a/src/domain/model.ts b/src/domain/model.ts index c085d361..f346e912 100644 --- a/src/domain/model.ts +++ b/src/domain/model.ts @@ -27,12 +27,15 @@ export type RunAttemptPhase = (typeof RUN_ATTEMPT_PHASES)[number]; export const ORCHESTRATOR_EVENTS = [ "poll_tick", + "poll_tick_completed", "worker_exit_normal", "worker_exit_abnormal", + "stage_completed", "codex_update_event", "retry_timer_fired", "reconciliation_state_refresh", "stall_timeout", + "shutdown_complete", ] as const; export type OrchestratorEvent = (typeof ORCHESTRATOR_EVENTS)[number]; @@ -79,6 +82,25 @@ export interface RunAttempt { error?: string; } +export interface TurnHistoryEntry { + turnNumber: number; + timestamp: string; + message: string | null; + inputTokens: number; + outputTokens: number; + totalTokens: number; + cacheReadTokens: number; + reasoningTokens: number; + event: string | null; +} + +export interface RecentActivityEntry { + timestamp: string; + toolName: string; + context: string | null; + totalTokens?: number; +} + export interface LiveSession { sessionId: string | null; threadId: string | null; @@ -90,10 +112,23 @@ export interface LiveSession { codexInputTokens: number; codexOutputTokens: number; codexTotalTokens: number; + codexCacheReadTokens: number; + codexCacheWriteTokens: number; + codexNoCacheTokens: number; + codexReasoningTokens: number; + codexTotalInputTokens: number; + codexTotalOutputTokens: number; lastReportedInputTokens: number; lastReportedOutputTokens: number; lastReportedTotalTokens: number; turnCount: number; + totalStageInputTokens: number; + totalStageOutputTokens: number; + totalStageTotalTokens: number; + totalStageCacheReadTokens: number; + totalStageCacheWriteTokens: number; + turnHistory: TurnHistoryEntry[]; + recentActivity: RecentActivityEntry[]; } export interface RetryEntry { @@ -103,12 +138,17 @@ export interface RetryEntry { dueAtMs: number; timerHandle: ReturnType<typeof setTimeout> | null; error: string | null; + delayType: "continuation" | "failure"; } export interface CodexTotals { inputTokens: number; outputTokens: number; totalTokens: number; + cacheReadTokens: number; + cacheWriteTokens: number; + noCacheTokens: number; + reasoningTokens: number; secondsRunning: number; } @@ -123,6 +163,18 @@ export interface RunningEntry extends LiveSession { monitorHandle: unknown; } +export interface StageRecord { + stageName: string; + durationMs: number; + totalTokens: number; + inputTokens?: number; + outputTokens?: number; + turns: number; + outcome: string; +} + +export type ExecutionHistory = StageRecord[]; + export interface OrchestratorState { pollIntervalMs: number; maxConcurrentAgents: number; @@ -130,8 +182,46 @@ export interface OrchestratorState { claimed: Set<string>; retryAttempts: Record<string, RetryEntry>; completed: Set<string>; + failed: Set<string>; codexTotals: CodexTotals; codexRateLimits: CodexRateLimits; + issueStages: Record<string, string>; + issueReworkCounts: Record<string, number>; + issueFirstDispatchedAt: Record<string, string>; + issueExecutionHistory: Record<string, ExecutionHistory>; +} + +export const FAILURE_CLASSES = [ + "verify", + "review", + "rebase", + "spec", + "infra", +] as const; +export type FailureClass = (typeof FAILURE_CLASSES)[number]; + +export interface FailureSignal { + failureClass: FailureClass; +} + +const STAGE_FAILED_REGEX = + /\[STAGE_FAILED:\s*(verify|review|rebase|spec|infra)\s*\]/; + +/** + * Parse a `[STAGE_FAILED: class]` signal from agent output text. + * Returns the parsed failure signal or null if no signal is found. + */ +export function parseFailureSignal( + text: string | null | undefined, +): FailureSignal | null { + if (text === null || text === undefined) { + return null; + } + const match = STAGE_FAILED_REGEX.exec(text); + if (match === null) { + return null; + } + return { failureClass: match[1] as FailureClass }; } export function normalizeIssueState(state: string): string { @@ -158,10 +248,23 @@ export function createEmptyLiveSession(): LiveSession { codexInputTokens: 0, codexOutputTokens: 0, codexTotalTokens: 0, + codexCacheReadTokens: 0, + codexCacheWriteTokens: 0, + codexNoCacheTokens: 0, + codexReasoningTokens: 0, + codexTotalInputTokens: 0, + codexTotalOutputTokens: 0, lastReportedInputTokens: 0, lastReportedOutputTokens: 0, lastReportedTotalTokens: 0, turnCount: 0, + totalStageInputTokens: 0, + totalStageOutputTokens: 0, + totalStageTotalTokens: 0, + totalStageCacheReadTokens: 0, + totalStageCacheWriteTokens: 0, + turnHistory: [], + recentActivity: [], }; } @@ -176,12 +279,21 @@ export function createInitialOrchestratorState(input: { claimed: new Set<string>(), retryAttempts: {}, completed: new Set<string>(), + failed: new Set<string>(), codexTotals: { inputTokens: 0, outputTokens: 0, totalTokens: 0, + cacheReadTokens: 0, + cacheWriteTokens: 0, + noCacheTokens: 0, + reasoningTokens: 0, secondsRunning: 0, }, codexRateLimits: null, + issueStages: {}, + issueReworkCounts: {}, + issueFirstDispatchedAt: {}, + issueExecutionHistory: {}, }; } diff --git a/src/index.ts b/src/index.ts index 7d210cb8..70d7365a 100644 --- a/src/index.ts +++ b/src/index.ts @@ -6,6 +6,7 @@ export * from "./config/config-resolver.js"; export * from "./config/types.js"; export * from "./codex/app-server-client.js"; export * from "./codex/linear-graphql-tool.js"; +export * from "./codex/workpad-sync-tool.js"; export * from "./config/workflow-loader.js"; export * from "./config/workflow-watch.js"; export * from "./domain/model.js"; @@ -16,6 +17,7 @@ export * from "./logging/session-metrics.js"; export * from "./logging/structured-logger.js"; export * from "./observability/dashboard-server.js"; export * from "./orchestrator/core.js"; +export * from "./orchestrator/gate-handler.js"; export * from "./orchestrator/runtime-host.js"; export * from "./workspace/hooks.js"; export * from "./tracker/errors.js"; @@ -23,5 +25,8 @@ export * from "./tracker/linear-client.js"; export * from "./tracker/linear-normalize.js"; export * from "./tracker/linear-queries.js"; export * from "./tracker/tracker.js"; +export * from "./runners/index.js"; export * from "./workspace/path-safety.js"; export * from "./workspace/workspace-manager.js"; +export * from "./slack-bot/index.js"; +export * from "./version.js"; diff --git a/src/logging/fields.ts b/src/logging/fields.ts index f566b176..12b5d78d 100644 --- a/src/logging/fields.ts +++ b/src/logging/fields.ts @@ -18,11 +18,30 @@ export const LOG_FIELDS = [ "input_tokens", "output_tokens", "total_tokens", + "cache_read_tokens", + "cache_write_tokens", + "no_cache_tokens", + "reasoning_tokens", "rate_limit_requests_remaining", "rate_limit_tokens_remaining", + "stage_name", + "turns_used", "duration_ms", "seconds_running", "error_code", + "turn_number", + "prompt_chars", + "estimated_prompt_tokens", + "workers_aborted", + "timed_out", + "dispatched_count", + "running_count", + "reconciled_stop_requests", + "total_input_tokens", + "total_output_tokens", + "total_cache_read_tokens", + "total_cache_write_tokens", + "turn_count", ] as const; export type LogField = (typeof LOG_FIELDS)[number]; diff --git a/src/logging/format-timestamp.ts b/src/logging/format-timestamp.ts new file mode 100644 index 00000000..989d5878 --- /dev/null +++ b/src/logging/format-timestamp.ts @@ -0,0 +1,65 @@ +/** + * Format a Date as an ISO-like string in US Eastern time. + * Output: "2026-03-21T14:45:00.000-04:00" (or -05:00 in EST) + */ +export function formatEasternTimestamp(date: Date = new Date()): string { + if (!Number.isFinite(date.getTime())) { + return "n/a"; + } + // Get the date/time components in Eastern time + const formatter = new Intl.DateTimeFormat("en-CA", { + timeZone: "America/New_York", + year: "numeric", + month: "2-digit", + day: "2-digit", + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + hour12: false, + }); + + const parts = formatter.formatToParts(date); + const get = (type: string) => parts.find((p) => p.type === type)?.value || ""; + + const year = get("year"); + const month = get("month"); + const day = get("day"); + const hour = get("hour"); + const minute = get("minute"); + const second = get("second"); + + // Get milliseconds (not available from formatToParts) + const ms = String(date.getMilliseconds()).padStart(3, "0"); + + // Get the timezone offset + const offset = getEasternOffset(date); + + return `${year}-${month}-${day}T${hour}:${minute}:${second}.${ms}${offset}`; +} + +/** + * Get the UTC offset for US Eastern time at the given date. + * Returns format like "-04:00" (EDT) or "-05:00" (EST) + */ +function getEasternOffset(date: Date): string { + const formatter = new Intl.DateTimeFormat("en-US", { + timeZone: "America/New_York", + timeZoneName: "shortOffset", + }); + + const parts = formatter.formatToParts(date); + const offsetPart = parts.find((p) => p.type === "timeZoneName"); + + // offsetPart.value is like "GMT-4" or "GMT-5" + const match = offsetPart?.value?.match(/GMT([+-]?\d+)/); + if (!match?.[1]) { + // Fallback to EST if we can't parse + return "-05:00"; + } + + const hours = Number.parseInt(match[1], 10); + const sign = hours <= 0 ? "-" : "+"; + const absHours = Math.abs(hours); + + return `${sign}${String(absHours).padStart(2, "0")}:00`; +} diff --git a/src/logging/runtime-snapshot.ts b/src/logging/runtime-snapshot.ts index d74789d8..aec4d8a9 100644 --- a/src/logging/runtime-snapshot.ts +++ b/src/logging/runtime-snapshot.ts @@ -2,24 +2,47 @@ import type { CodexRateLimits, CodexTotals, OrchestratorState, + RecentActivityEntry, + StageRecord, + TurnHistoryEntry, } from "../domain/model.js"; +import { formatEasternTimestamp } from "./format-timestamp.js"; import { getAggregateSecondsRunning } from "./session-metrics.js"; +export type HealthStatus = "green" | "yellow" | "red"; + export interface RuntimeSnapshotRunningRow { issue_id: string; issue_identifier: string; + issue_title: string; state: string; + pipeline_stage: string | null; + activity_summary: string | null; session_id: string | null; turn_count: number; last_event: string | null; last_message: string | null; started_at: string; + first_dispatched_at: string; last_event_at: string | null; + stage_duration_seconds: number; + tokens_per_turn: number; tokens: { input_tokens: number; output_tokens: number; total_tokens: number; + cache_read_tokens: number; + cache_write_tokens: number; + reasoning_tokens: number; }; + rework_count?: number; + total_pipeline_tokens: number; + execution_history: StageRecord[]; + turn_history: TurnHistoryEntry[]; + recent_activity: RecentActivityEntry[]; + last_tool_call: string | null; + health: HealthStatus; + health_reason: string | null; } export interface RuntimeSnapshotRetryRow { @@ -35,6 +58,8 @@ export interface RuntimeSnapshot { counts: { running: number; retrying: number; + completed: number; + failed: number; }; running: RuntimeSnapshotRunningRow[]; retrying: RuntimeSnapshotRetryRow[]; @@ -60,22 +85,70 @@ export function buildRuntimeSnapshot( .sort((left, right) => left.identifier.localeCompare(right.identifier, "en"), ) - .map((entry) => ({ - issue_id: entry.issue.id, - issue_identifier: entry.identifier, - state: entry.issue.state, - session_id: entry.sessionId, - turn_count: entry.turnCount, - last_event: entry.lastCodexEvent, - last_message: entry.lastCodexMessage, - started_at: entry.startedAt, - last_event_at: entry.lastCodexTimestamp, - tokens: { - input_tokens: entry.codexInputTokens, - output_tokens: entry.codexOutputTokens, - total_tokens: entry.codexTotalTokens, - }, - })); + .map((entry) => { + const reworkCount = state.issueReworkCounts[entry.issue.id] ?? 0; + const startedAtMs = Date.parse(entry.startedAt); + const stageDurationSeconds = Number.isFinite(startedAtMs) + ? Math.max(0, (now.getTime() - startedAtMs) / 1000) + : 0; + const tokensPerTurn = + entry.turnCount > 0 ? entry.totalStageTotalTokens / entry.turnCount : 0; + const executionHistory = + state.issueExecutionHistory[entry.issue.id] ?? []; + const completedStageTokens = executionHistory.reduce( + (sum, stage) => sum + stage.totalTokens, + 0, + ); + const totalPipelineTokens = + completedStageTokens + entry.totalStageTotalTokens; + const pipelineStage = state.issueStages[entry.issue.id] ?? null; + const { health, health_reason } = classifyHealth( + entry.lastCodexTimestamp, + tokensPerTurn, + now, + pipelineStage, + ); + const row: RuntimeSnapshotRunningRow = { + issue_id: entry.issue.id, + issue_identifier: entry.identifier, + issue_title: entry.issue.title, + state: entry.issue.state, + pipeline_stage: pipelineStage, + activity_summary: entry.lastCodexMessage, + session_id: entry.sessionId, + turn_count: entry.turnCount, + last_event: entry.lastCodexEvent, + last_message: entry.lastCodexMessage, + started_at: entry.startedAt, + first_dispatched_at: + state.issueFirstDispatchedAt[entry.issue.id] ?? entry.startedAt, + last_event_at: + entry.lastCodexTimestamp !== null + ? formatEasternTimestamp(new Date(entry.lastCodexTimestamp)) + : null, + stage_duration_seconds: stageDurationSeconds, + tokens_per_turn: tokensPerTurn, + tokens: { + input_tokens: entry.totalStageInputTokens, + output_tokens: entry.totalStageOutputTokens, + total_tokens: entry.totalStageTotalTokens, + cache_read_tokens: entry.totalStageCacheReadTokens, + cache_write_tokens: entry.totalStageCacheWriteTokens, + reasoning_tokens: entry.codexReasoningTokens, + }, + total_pipeline_tokens: totalPipelineTokens, + execution_history: executionHistory, + turn_history: entry.turnHistory, + recent_activity: entry.recentActivity, + last_tool_call: deriveLastToolCall(entry.recentActivity), + health, + health_reason, + }; + if (reworkCount > 0) { + row.rework_count = reworkCount; + } + return row; + }); const retrying = Object.values(state.retryAttempts) .slice() @@ -84,15 +157,17 @@ export function buildRuntimeSnapshot( issue_id: entry.issueId, issue_identifier: entry.identifier, attempt: entry.attempt, - due_at: new Date(entry.dueAtMs).toISOString(), + due_at: formatEasternTimestamp(new Date(entry.dueAtMs)), error: entry.error, })); return { - generated_at: now.toISOString(), + generated_at: formatEasternTimestamp(now), counts: { running: running.length, retrying: retrying.length, + completed: state.completed.size, + failed: state.failed.size, }, running, retrying, @@ -104,6 +179,15 @@ export function buildRuntimeSnapshot( }; } +function deriveLastToolCall( + recentActivity: RecentActivityEntry[], +): string | null { + if (recentActivity.length === 0) return null; + const last = recentActivity[recentActivity.length - 1]; + if (last === undefined) return null; + return last.context ? `${last.toolName} ${last.context}` : last.toolName; +} + function toSnapshotCodexTotals( totals: CodexTotals, secondsRunning: number, @@ -115,3 +199,59 @@ function toSnapshotCodexTotals( seconds_running: secondsRunning, }; } + +/** Per-stage default stall thresholds in seconds. */ +export const STAGE_STALL_THRESHOLDS: Record<string, number> = { + investigate: 600, + implement: 480, + review: 600, + merge: 300, +}; + +const DEFAULT_STALL_THRESHOLD_SECONDS = 480; +const HIGH_TOKEN_BURN_THRESHOLD = 20_000; + +export function getStallThreshold(stageName: string | null): number { + if (stageName !== null && stageName in STAGE_STALL_THRESHOLDS) { + return STAGE_STALL_THRESHOLDS[stageName] ?? DEFAULT_STALL_THRESHOLD_SECONDS; + } + return DEFAULT_STALL_THRESHOLD_SECONDS; +} + +function classifyHealth( + lastEventAt: string | null, + tokensPerTurn: number, + now: Date, + stageName: string | null, +): { health: HealthStatus; health_reason: string | null } { + if (lastEventAt !== null) { + const lastEventMs = Date.parse(lastEventAt); + if (Number.isFinite(lastEventMs)) { + const secondsSinceEvent = (now.getTime() - lastEventMs) / 1000; + const threshold = getStallThreshold(stageName); + const stageLabel = stageName ?? "unknown"; + + if (secondsSinceEvent > threshold * 0.8) { + return { + health: "red", + health_reason: `stalled: no activity for ${Math.floor(secondsSinceEvent)}s (${stageLabel} stage, threshold ${threshold}s)`, + }; + } + if (secondsSinceEvent > threshold * 0.5) { + return { + health: "yellow", + health_reason: `slow: no activity for ${Math.floor(secondsSinceEvent)}s (${stageLabel} stage, threshold ${threshold}s)`, + }; + } + } + } + + if (tokensPerTurn > HIGH_TOKEN_BURN_THRESHOLD) { + return { + health: "yellow", + health_reason: `high token burn: ${Math.round(tokensPerTurn).toLocaleString("en-US")} tokens/turn`, + }; + } + + return { health: "green", health_reason: null }; +} diff --git a/src/logging/session-metrics.ts b/src/logging/session-metrics.ts index 9f069fe6..de7a6a1d 100644 --- a/src/logging/session-metrics.ts +++ b/src/logging/session-metrics.ts @@ -1,10 +1,16 @@ +import * as path from "node:path"; import type { CodexClientEvent } from "../codex/app-server-client.js"; import type { LiveSession, OrchestratorState, + RecentActivityEntry, RunningEntry, + TurnHistoryEntry, } from "../domain/model.js"; +const TURN_HISTORY_MAX_SIZE = 50; +const RECENT_ACTIVITY_MAX_SIZE = 10; + const SESSION_EVENT_MESSAGES: Partial< Record<CodexClientEvent["event"], string> > = Object.freeze({ @@ -20,12 +26,17 @@ const SESSION_EVENT_MESSAGES: Partial< notification: "notification", other_message: "other message", malformed: "malformed event", + activity_heartbeat: "activity heartbeat", }); export interface SessionTelemetryUpdateResult { inputTokensDelta: number; outputTokensDelta: number; totalTokensDelta: number; + cacheReadTokensDelta: number; + cacheWriteTokensDelta: number; + noCacheTokensDelta: number; + reasoningTokensDelta: number; rateLimitsUpdated: boolean; } @@ -48,7 +59,43 @@ export function applyCodexEventToSession( session.lastCodexMessage = summarizeCodexEvent(event); if (event.event === "session_started") { + // Push previous turn summary to ring buffer before resetting counters + if (session.turnCount > 0) { + const entry: TurnHistoryEntry = { + turnNumber: session.turnCount, + timestamp: event.timestamp, + message: session.lastCodexMessage, + inputTokens: session.codexInputTokens, + outputTokens: session.codexOutputTokens, + totalTokens: session.codexTotalTokens, + cacheReadTokens: session.codexCacheReadTokens, + reasoningTokens: session.codexReasoningTokens, + event: session.lastCodexEvent, + }; + session.turnHistory.push(entry); + if (session.turnHistory.length > TURN_HISTORY_MAX_SIZE) { + session.turnHistory.splice( + 0, + session.turnHistory.length - TURN_HISTORY_MAX_SIZE, + ); + } + } session.turnCount += 1; + // Reset per-turn absolute counters so the next turn's deltas accumulate from 0 + session.lastReportedInputTokens = 0; + session.lastReportedOutputTokens = 0; + session.lastReportedTotalTokens = 0; + } + + const activityEntry = buildRecentActivityEntry(event); + if (activityEntry !== null) { + session.recentActivity.push(activityEntry); + if (session.recentActivity.length > RECENT_ACTIVITY_MAX_SIZE) { + session.recentActivity.splice( + 0, + session.recentActivity.length - RECENT_ACTIVITY_MAX_SIZE, + ); + } } if (event.usage === undefined) { @@ -56,6 +103,10 @@ export function applyCodexEventToSession( inputTokensDelta: 0, outputTokensDelta: 0, totalTokensDelta: 0, + cacheReadTokensDelta: 0, + cacheWriteTokensDelta: 0, + noCacheTokensDelta: 0, + reasoningTokensDelta: 0, rateLimitsUpdated: event.rateLimits !== undefined, }; } @@ -77,9 +128,37 @@ export function applyCodexEventToSession( totalTokens, ); + const cacheReadTokensDelta = + event.usage.cacheReadTokens !== undefined + ? normalizeAbsoluteCounter(event.usage.cacheReadTokens) + : 0; + const cacheWriteTokensDelta = + event.usage.cacheWriteTokens !== undefined + ? normalizeAbsoluteCounter(event.usage.cacheWriteTokens) + : 0; + const noCacheTokensDelta = + event.usage.noCacheTokens !== undefined + ? normalizeAbsoluteCounter(event.usage.noCacheTokens) + : 0; + const reasoningTokensDelta = + event.usage.reasoningTokens !== undefined + ? normalizeAbsoluteCounter(event.usage.reasoningTokens) + : 0; + session.codexInputTokens = inputTokens; session.codexOutputTokens = outputTokens; session.codexTotalTokens = totalTokens; + session.codexCacheReadTokens += cacheReadTokensDelta; + session.codexCacheWriteTokens += cacheWriteTokensDelta; + session.codexNoCacheTokens += noCacheTokensDelta; + session.codexReasoningTokens += reasoningTokensDelta; + session.codexTotalInputTokens += inputTokensDelta; + session.codexTotalOutputTokens += outputTokensDelta; + session.totalStageInputTokens += inputTokensDelta; + session.totalStageOutputTokens += outputTokensDelta; + session.totalStageTotalTokens += totalTokensDelta; + session.totalStageCacheReadTokens += cacheReadTokensDelta; + session.totalStageCacheWriteTokens += cacheWriteTokensDelta; session.lastReportedInputTokens = inputTokens; session.lastReportedOutputTokens = outputTokens; session.lastReportedTotalTokens = totalTokens; @@ -88,6 +167,10 @@ export function applyCodexEventToSession( inputTokensDelta, outputTokensDelta, totalTokensDelta, + cacheReadTokensDelta, + cacheWriteTokensDelta, + noCacheTokensDelta, + reasoningTokensDelta, rateLimitsUpdated: event.rateLimits !== undefined, }; } @@ -102,6 +185,10 @@ export function applyCodexEventToOrchestratorState( state.codexTotals.inputTokens += result.inputTokensDelta; state.codexTotals.outputTokens += result.outputTokensDelta; state.codexTotals.totalTokens += result.totalTokensDelta; + state.codexTotals.cacheReadTokens += result.cacheReadTokensDelta; + state.codexTotals.cacheWriteTokens += result.cacheWriteTokensDelta; + state.codexTotals.noCacheTokens += result.noCacheTokensDelta; + state.codexTotals.reasoningTokens += result.reasoningTokensDelta; if (event.rateLimits !== undefined) { state.codexRateLimits = event.rateLimits; @@ -182,3 +269,253 @@ function normalizeAbsoluteCounter(value: number): number { function roundSeconds(value: number): number { return Math.round(value * 1000) / 1000; } + +/** + * Extract the tool name from a raw JSON-RPC message object. + * Duplicates the extraction logic from app-server-client.ts (which is private). + */ +function extractNestedString( + source: Record<string, unknown>, + keyPath: readonly string[], +): string | null { + let current: unknown = source; + for (const segment of keyPath) { + if ( + current === null || + typeof current !== "object" || + Array.isArray(current) + ) { + return null; + } + current = (current as Record<string, unknown>)[segment]; + } + if (typeof current === "string" && current.trim().length > 0) { + return current.trim(); + } + return null; +} + +export function extractToolNameFromRaw( + raw: Record<string, unknown>, +): string | null { + const candidates = [ + extractNestedString(raw, ["params", "toolName"]), + extractNestedString(raw, ["params", "name"]), + extractNestedString(raw, ["params", "tool", "name"]), + extractNestedString(raw, ["name"]), + ]; + return candidates.find((v) => v !== null) ?? null; +} + +export function extractToolInputFromRaw(raw: Record<string, unknown>): unknown { + const params = + raw.params !== null && + typeof raw.params === "object" && + !Array.isArray(raw.params) + ? (raw.params as Record<string, unknown>) + : null; + + if (params === null) { + return undefined; + } + + const candidates = [ + params.input, + params.arguments, + params.args, + params.payload, + params.toolInput, + ]; + + for (const candidate of candidates) { + if (candidate !== undefined) { + return candidate; + } + } + + return undefined; +} + +const NOTIFICATION_CONTEXT_MAX_LENGTH = 80; +const BASH_COMMAND_MAX_LENGTH = 60; + +/** + * Build a RecentActivityEntry from a CodexClientEvent, or return null if the + * event type should not produce an activity entry. + */ +function buildRecentActivityEntry( + event: CodexClientEvent, +): RecentActivityEntry | null { + // Tool-call events: extract tool name + context from raw payload + if ( + (event.event === "approval_auto_approved" || + event.event === "unsupported_tool_call") && + event.raw != null + ) { + const raw = + typeof event.raw === "object" && !Array.isArray(event.raw) + ? (event.raw as Record<string, unknown>) + : null; + if (raw !== null) { + const toolName = extractToolNameFromRaw(raw); + if (toolName !== null) { + const toolInput = extractToolInputFromRaw(raw); + const context = buildActivityContext(toolName, toolInput); + const entry: RecentActivityEntry = { + timestamp: event.timestamp, + toolName, + context, + }; + if (event.usage !== undefined) { + const total = normalizeAbsoluteCounter(event.usage.totalTokens); + if (total > 0) { + entry.totalTokens = total; + } + } + return entry; + } + } + return null; + } + + // Turn outcome events: show turn result with optional token count + if (event.event === "turn_completed" || event.event === "turn_failed") { + const label = + event.event === "turn_completed" ? "Turn completed" : "Turn failed"; + const entry: RecentActivityEntry = { + timestamp: event.timestamp, + toolName: label, + context: null, + }; + if (event.usage !== undefined) { + const total = normalizeAbsoluteCounter(event.usage.totalTokens); + if (total > 0) { + entry.totalTokens = total; + } + } + return entry; + } + + // Session started + if (event.event === "session_started") { + return { + timestamp: event.timestamp, + toolName: "Session started", + context: null, + }; + } + + // Notification: use event.message (truncated) as context + if (event.event === "notification") { + let context: string | null = null; + if (event.message !== undefined && event.message.trim().length > 0) { + const trimmed = event.message.trim(); + if (trimmed.length <= NOTIFICATION_CONTEXT_MAX_LENGTH) { + context = trimmed; + } else { + context = `${trimmed.slice(0, NOTIFICATION_CONTEXT_MAX_LENGTH)}…`; + } + } + return { timestamp: event.timestamp, toolName: "Notification", context }; + } + + return null; +} + +/** + * Add a pipeline-level (non-CC) activity entry to a session's recentActivity. + * Used by the orchestrator to record stage transitions, state changes, and + * session start events so the activity feed is never empty. + */ +export function addPipelineActivity( + session: LiveSession, + eventType: string, + description: string, +): void { + const entry: RecentActivityEntry = { + timestamp: new Date().toISOString(), + toolName: eventType, + context: description, + }; + session.recentActivity.push(entry); + if (session.recentActivity.length > RECENT_ACTIVITY_MAX_SIZE) { + session.recentActivity.splice( + 0, + session.recentActivity.length - RECENT_ACTIVITY_MAX_SIZE, + ); + } +} + +export function buildActivityContext( + toolName: string, + toolInput: unknown, +): string | null { + if ( + toolInput === null || + toolInput === undefined || + typeof toolInput !== "object" || + Array.isArray(toolInput) + ) { + return null; + } + + const input = toolInput as Record<string, unknown>; + const normalized = toolName.toLowerCase(); + + // File tools: Read, Edit, Write, Glob — extract file_path or pattern, take basename + if ( + normalized === "read" || + normalized === "edit" || + normalized === "write" + ) { + const filePath = + typeof input.file_path === "string" ? input.file_path : null; + if (filePath !== null && filePath.trim().length > 0) { + return path.basename(filePath.trim()); + } + return null; + } + + if (normalized === "glob") { + const pattern = typeof input.pattern === "string" ? input.pattern : null; + if (pattern !== null && pattern.trim().length > 0) { + return pattern.trim(); + } + return null; + } + + // Bash: extract command and truncate + if (normalized === "bash") { + const command = typeof input.command === "string" ? input.command : null; + if (command !== null && command.trim().length > 0) { + const trimmed = command.trim(); + if (trimmed.length <= BASH_COMMAND_MAX_LENGTH) { + return trimmed; + } + return `${trimmed.slice(0, BASH_COMMAND_MAX_LENGTH)}…`; + } + return null; + } + + // Grep: extract pattern + if (normalized === "grep") { + const pattern = typeof input.pattern === "string" ? input.pattern : null; + if (pattern !== null && pattern.trim().length > 0) { + return pattern.trim(); + } + return null; + } + + // Unknown tools: extract first string-valued argument, truncated to 60 chars + for (const value of Object.values(input)) { + if (typeof value === "string" && value.trim().length > 0) { + const trimmed = value.trim(); + if (trimmed.length <= BASH_COMMAND_MAX_LENGTH) { + return trimmed; + } + return `${trimmed.slice(0, BASH_COMMAND_MAX_LENGTH)}…`; + } + } + + return null; +} diff --git a/src/logging/structured-logger.ts b/src/logging/structured-logger.ts index aee750a9..88e38d7e 100644 --- a/src/logging/structured-logger.ts +++ b/src/logging/structured-logger.ts @@ -1,6 +1,7 @@ import type { Writable } from "node:stream"; import type { LogField } from "./fields.js"; +import { formatEasternTimestamp } from "./format-timestamp.js"; export type StructuredLogLevel = "debug" | "info" | "warn" | "error"; @@ -152,7 +153,7 @@ export function createStructuredLogEntry( now = new Date(), ): StructuredLogEntry { const merged: StructuredLogEntry = { - timestamp: now.toISOString(), + timestamp: formatEasternTimestamp(now), level: base.level, event: base.event, message: formatStructuredMessage(base.event, base.message, context), diff --git a/src/observability/dashboard-render.ts b/src/observability/dashboard-render.ts index bab5b7a0..1242246a 100644 --- a/src/observability/dashboard-render.ts +++ b/src/observability/dashboard-render.ts @@ -1,10 +1,12 @@ import type { RuntimeSnapshot } from "../logging/runtime-snapshot.js"; +import { getDisplayVersion } from "../version.js"; import { escapeHtml, formatInteger, formatRuntimeAndTurns, formatRuntimeSeconds, prettyValue, + runtimeSecondsFromStartedAt, stateBadgeClass, } from "./dashboard-format.js"; @@ -14,26 +16,26 @@ export interface DashboardRenderOptions { const DASHBOARD_STYLES = String.raw` :root { - color-scheme: light; - --page: #f7f7f8; - --page-soft: #fbfbfc; - --page-deep: #ececf1; - --card: rgba(255, 255, 255, 0.94); - --card-muted: #f3f4f6; - --ink: #202123; - --muted: #6e6e80; - --line: #ececf1; - --line-strong: #d9d9e3; + color-scheme: dark; + --page: #111113; + --page-soft: #161618; + --page-deep: #0c0c0e; + --card: rgba(28, 28, 32, 0.94); + --card-muted: #1e1e22; + --ink: #e8e8ec; + --muted: #8e8ea0; + --line: #2a2a30; + --line-strong: #3a3a42; --accent: #10a37f; - --accent-ink: #0f513f; - --accent-soft: #e8faf4; - --danger: #b42318; - --danger-soft: #fef3f2; - --warning: #8a5a00; - --warning-soft: #fff7e8; - --warning-line: #f1d8a6; - --shadow-sm: 0 1px 2px rgba(16, 24, 40, 0.05); - --shadow-lg: 0 20px 50px rgba(15, 23, 42, 0.08); + --accent-ink: #5fe0b8; + --accent-soft: rgba(16, 163, 127, 0.12); + --danger: #f87171; + --danger-soft: rgba(248, 113, 113, 0.1); + --warning: #fbbf24; + --warning-soft: rgba(251, 191, 36, 0.1); + --warning-line: rgba(251, 191, 36, 0.2); + --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.3); + --shadow-lg: 0 20px 50px rgba(0, 0, 0, 0.4); } * { box-sizing: border-box; @@ -45,8 +47,8 @@ const DASHBOARD_STYLES = String.raw` margin: 0; min-height: 100vh; background: - radial-gradient(circle at top, rgba(16, 163, 127, 0.12) 0%, rgba(16, 163, 127, 0) 30%), - linear-gradient(180deg, var(--page-soft) 0%, var(--page) 24%, #f3f4f6 100%); + radial-gradient(circle at top, rgba(16, 163, 127, 0.08) 0%, rgba(16, 163, 127, 0) 30%), + linear-gradient(180deg, var(--page-soft) 0%, var(--page) 24%, var(--page-deep) 100%); color: var(--ink); font-family: "Sohne", "SF Pro Text", "Helvetica Neue", "Segoe UI", sans-serif; line-height: 1.5; @@ -70,7 +72,7 @@ const DASHBOARD_STYLES = String.raw` font: inherit; font-weight: 600; letter-spacing: -0.01em; - box-shadow: 0 8px 20px rgba(16, 163, 127, 0.18); + box-shadow: 0 8px 20px rgba(16, 163, 127, 0.25); transition: transform 140ms ease, box-shadow 140ms ease, @@ -79,11 +81,11 @@ const DASHBOARD_STYLES = String.raw` } button:hover { transform: translateY(-1px); - box-shadow: 0 12px 24px rgba(16, 163, 127, 0.22); + box-shadow: 0 12px 24px rgba(16, 163, 127, 0.3); } .subtle-button { border: 1px solid var(--line-strong); - background: rgba(255, 255, 255, 0.72); + background: rgba(255, 255, 255, 0.06); color: var(--muted); padding: 0.34rem 0.72rem; font-size: 0.82rem; @@ -93,7 +95,7 @@ const DASHBOARD_STYLES = String.raw` .subtle-button:hover { transform: none; box-shadow: none; - background: white; + background: rgba(255, 255, 255, 0.1); border-color: var(--muted); color: var(--ink); } @@ -120,7 +122,7 @@ const DASHBOARD_STYLES = String.raw` .section-card, .metric-card { background: var(--card); - border: 1px solid rgba(217, 217, 227, 0.82); + border: 1px solid var(--line); box-shadow: var(--shadow-sm); backdrop-filter: blur(18px); } @@ -184,7 +186,7 @@ const DASHBOARD_STYLES = String.raw` } .status-badge-live { background: var(--accent-soft); - border-color: rgba(16, 163, 127, 0.18); + border-color: rgba(16, 163, 127, 0.3); color: var(--accent-ink); } .metric-grid { @@ -301,7 +303,7 @@ const DASHBOARD_STYLES = String.raw` } .state-badge-active { background: var(--accent-soft); - border-color: rgba(16, 163, 127, 0.18); + border-color: rgba(16, 163, 127, 0.3); color: var(--accent-ink); } .state-badge-warning { @@ -311,16 +313,43 @@ const DASHBOARD_STYLES = String.raw` } .state-badge-danger { background: var(--danger-soft); - border-color: #f6d3cf; + border-color: rgba(248, 113, 113, 0.2); color: var(--danger); } + .health-badge { + display: inline-flex; + align-items: center; + gap: 0.3rem; + min-height: 1.85rem; + padding: 0.3rem 0.68rem; + border-radius: 999px; + border: 1px solid var(--line); + background: var(--card-muted); + color: var(--ink); + font-size: 0.8rem; + font-weight: 600; + line-height: 1; + } + .health-badge-dot { + display: inline-block; + width: 0.5rem; + height: 0.5rem; + border-radius: 50%; + background: var(--ink-muted); + } + .health-badge-green { background: var(--accent-soft); border-color: rgba(16, 163, 127, 0.3); color: var(--accent-ink); } + .health-badge-green .health-badge-dot { background: var(--accent); } + .health-badge-yellow { background: var(--warning-soft); border-color: var(--warning-line); color: var(--warning); } + .health-badge-yellow .health-badge-dot { background: var(--warning); } + .health-badge-red { background: var(--danger-soft); border-color: rgba(248, 113, 113, 0.2); color: var(--danger); } + .health-badge-red .health-badge-dot { background: var(--danger); } .issue-id { font-weight: 600; letter-spacing: -0.01em; } - .issue-link { - color: var(--muted); - font-size: 0.86rem; + .issue-title { + font-size: 0.84rem; + white-space: normal; } .muted { color: var(--muted); @@ -329,9 +358,9 @@ const DASHBOARD_STYLES = String.raw` margin-top: 1rem; padding: 1rem; border-radius: 18px; - background: #f5f5f7; + background: var(--page-deep); border: 1px solid var(--line); - color: #353740; + color: var(--ink); font-size: 0.9rem; white-space: pre-wrap; word-break: break-word; @@ -340,6 +369,125 @@ const DASHBOARD_STYLES = String.raw` margin: 1rem 0 0; color: var(--muted); } + .expand-toggle { + border: 1px solid var(--line-strong); + background: rgba(255, 255, 255, 0.06); + color: var(--muted); + border-radius: 4px; + padding: 0.18rem 0.48rem; + font-size: 0.78rem; + font-weight: 600; + letter-spacing: 0.01em; + box-shadow: none; + cursor: pointer; + transition: background 120ms ease, color 120ms ease; + margin-top: 0.3rem; + } + .expand-toggle:hover { + transform: none; + box-shadow: none; + background: rgba(255, 255, 255, 0.1); + border-color: var(--muted); + color: var(--ink); + } + .detail-row > td { + padding: 0; + border-top: none; + } + .detail-panel { + padding: 1rem 1.25rem; + background: var(--page-soft); + border-top: 1px solid var(--line); + border-bottom: 2px solid var(--line-strong); + } + .detail-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); + gap: 1rem; + } + .detail-section { + min-width: 0; + } + .detail-section-title { + margin: 0 0 0.45rem; + font-size: 0.78rem; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.05em; + color: var(--muted); + } + .detail-kv { + display: grid; + grid-template-columns: auto 1fr; + gap: 0.12rem 0.75rem; + font-size: 0.88rem; + } + .detail-kv-label { + color: var(--muted); + white-space: nowrap; + } + .detail-kv-value { + font-variant-numeric: tabular-nums slashed-zero; + font-feature-settings: "tnum" 1, "zero" 1; + } + .turn-timeline { + list-style: none; + margin: 0; + padding: 0; + font-size: 0.84rem; + max-height: 9rem; + overflow-y: auto; + } + .turn-timeline li { + display: grid; + grid-template-columns: 5.5rem 1fr auto; + gap: 0.3rem; + padding: 0.22rem 0; + border-top: 1px solid var(--line); + align-items: baseline; + } + .turn-timeline li:first-child { + border-top: none; + } + .turn-num { + color: var(--muted); + font-size: 0.78rem; + font-weight: 700; + text-align: left; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + } + .turn-msg { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + color: var(--ink); + } + .activity-time { + color: var(--muted); + font-size: 0.76rem; + white-space: nowrap; + } + .exec-history-table { + width: 100%; + border-collapse: collapse; + font-size: 0.84rem; + } + .exec-history-table th { + text-align: left; + padding: 0 0.4rem 0.35rem 0; + font-size: 0.74rem; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.04em; + color: var(--muted); + } + .exec-history-table td { + padding: 0.2rem 0.4rem 0.2rem 0; + border-top: 1px solid var(--line); + vertical-align: top; + } @media (max-width: 860px) { .app-shell { padding: 1rem 0.85rem 2rem; @@ -364,6 +512,50 @@ const DASHBOARD_STYLES = String.raw` padding: 1rem; } } + .context-section { + display: flex; + flex-wrap: wrap; + gap: 0.35rem 1.25rem; + align-items: baseline; + margin-bottom: 0.75rem; + padding-bottom: 0.6rem; + border-bottom: 1px solid var(--line); + } + .context-item { + display: inline-flex; + align-items: baseline; + gap: 0.4rem; + font-size: 0.88rem; + } + .context-label { + color: var(--muted); + font-size: 0.72rem; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.05em; + } + .context-value { + color: var(--ink); + } + .context-health-red { + color: var(--danger); + font-size: 0.86rem; + } + .context-health-yellow { + color: var(--warning); + font-size: 0.86rem; + } + .stage-badge { + display: inline-flex; + align-items: center; + padding: 0.18rem 0.5rem; + border-radius: 999px; + border: 1px solid rgba(16, 163, 127, 0.18); + background: var(--accent-soft); + color: var(--accent-ink); + font-size: 0.78rem; + font-weight: 600; + } `; export function renderDashboardHtml( @@ -394,7 +586,7 @@ ${DASHBOARD_STYLES} <header class="hero-card"> <div class="hero-grid"> <div> - <p class="eyebrow">Symphony Observability</p> + <p class="eyebrow">Symphony Observability — v${getDisplayVersion()}</p> <h1 class="hero-title">Operations Dashboard</h1> <p class="hero-copy"> Current state, retry pressure, token usage, and orchestration health for the active Symphony runtime. @@ -425,6 +617,18 @@ ${DASHBOARD_STYLES} <p class="metric-detail">Issues waiting for the next retry window.</p> </article> + <article class="metric-card"> + <p class="metric-label">Completed</p> + <p id="metric-completed" class="metric-value numeric">${snapshot.counts.completed}</p> + <p class="metric-detail">Issues that completed successfully.</p> + </article> + + <article class="metric-card"> + <p class="metric-label">Failed</p> + <p id="metric-failed" class="metric-value numeric">${snapshot.counts.failed}</p> + <p class="metric-detail">Issues whose final stage failed.</p> + </article> + <article class="metric-card"> <p class="metric-label">Total tokens</p> <p id="metric-total" class="metric-value numeric">${totalTokensLabel}</p> @@ -464,6 +668,7 @@ ${DASHBOARD_STYLES} <col style="width: 8rem;" /> <col style="width: 7.5rem;" /> <col style="width: 8.5rem;" /> + <col style="width: 7rem;" /> <col /> <col style="width: 10rem;" /> </colgroup> @@ -473,6 +678,7 @@ ${DASHBOARD_STYLES} <th>State</th> <th>Session</th> <th>Runtime / turns</th> + <th>Pipeline</th> <th>Codex update</th> <th>Tokens</th> </tr> @@ -570,6 +776,13 @@ function renderDashboardClientScript( return runtime; } + function formatPipelineTime(row, generatedAt) { + if (!row.first_dispatched_at || row.first_dispatched_at === row.started_at) { + return '\u2014'; + } + return formatRuntimeSeconds(runtimeSecondsFromStartedAt(row.first_dispatched_at, generatedAt)); + } + function stateBadgeClass(state) { const normalized = String(state || '').toLowerCase(); if (normalized.includes('progress') || normalized.includes('running') || normalized.includes('active')) { @@ -595,29 +808,132 @@ function renderDashboardClientScript( } } + function formatCompactTokens(tokens) { + if (tokens >= 1000000) { + return (tokens / 1000000).toFixed(1) + 'M'; + } + if (tokens >= 1000) { + return (tokens / 1000).toFixed(1) + 'k'; + } + return String(tokens); + } + + function renderOutcomeLabel(outcome) { + if (outcome === 'normal') return '<span style="color: var(--accent-ink)">normal</span>'; + if (outcome === 'failed_to_start') return '<span style="color: var(--danger)">failed to start</span>'; + if (outcome === 'timed_out') return '<span style="color: var(--warning)">timed out</span>'; + if (outcome === 'error') return '<span style="color: var(--danger)">error</span>'; + return escapeHtml(outcome); + } + + function renderDetailPanel(row, rowId) { + var contextItems = []; + if (row.pipeline_stage != null) { + contextItems.push('<span class="context-item"><span class="context-label">Stage</span> <span class="stage-badge">' + escapeHtml(row.pipeline_stage) + '</span></span>'); + } + if (row.activity_summary != null) { + contextItems.push('<span class="context-item"><span class="context-label">Doing</span> <span class="context-value">' + escapeHtml(row.activity_summary) + '</span></span>'); + } + if (row.health_reason != null) { + var healthClass = row.health === 'red' ? 'context-health-red' : 'context-health-yellow'; + contextItems.push('<span class="context-item"><span class="context-label">Health</span> <span class="' + healthClass + '">' + escapeHtml(row.health_reason) + '</span></span>'); + } + if (row.rework_count != null && row.rework_count > 0) { + contextItems.push('<span class="context-item"><span class="context-label">Rework</span> <span class="state-badge state-badge-warning">\xD7' + formatInteger(row.rework_count) + '</span></span>'); + } + var contextSection = contextItems.length > 0 ? '<div class="context-section">' + contextItems.join('') + '</div>' : ''; + + const tokenBreakdown = + '<div class="detail-section">' + + '<p class="detail-section-title">Token breakdown</p>' + + '<div class="detail-kv">' + + '<span class="detail-kv-label">Input</span><span class="detail-kv-value numeric">' + formatInteger(row.tokens && row.tokens.input_tokens) + '</span>' + + '<span class="detail-kv-label">Output</span><span class="detail-kv-value numeric">' + formatInteger(row.tokens && row.tokens.output_tokens) + '</span>' + + '<span class="detail-kv-label">Total</span><span class="detail-kv-value numeric">' + formatInteger(row.tokens && row.tokens.total_tokens) + '</span>' + + '<span class="detail-kv-label">Cache read</span><span class="detail-kv-value numeric">' + formatInteger(row.tokens && row.tokens.cache_read_tokens) + '</span>' + + '<span class="detail-kv-label">Cache write</span><span class="detail-kv-value numeric">' + formatInteger(row.tokens && row.tokens.cache_write_tokens) + '</span>' + + '<span class="detail-kv-label">Reasoning</span><span class="detail-kv-value numeric">' + formatInteger(row.tokens && row.tokens.reasoning_tokens) + '</span>' + + '<span class="detail-kv-label">Pipeline</span><span class="detail-kv-value numeric">' + formatInteger(row.total_pipeline_tokens) + '</span>' + + '</div></div>'; + + var displayActivity = (row.recent_activity || []).slice(-5); + const recentActivityItems = (displayActivity.length === 0) + ? (function () { + if (row.pipeline_stage != null) { + var startMs = Date.parse(row.started_at); + var elapsedSecs = isFinite(startMs) ? Math.max(0, Math.floor((Date.now() - startMs) / 1000)) : 0; + var agoLabel = elapsedSecs < 60 ? elapsedSecs + 's ago' : Math.floor(elapsedSecs / 60) + 'm ago'; + return '<li><span class="turn-num">' + escapeHtml(row.pipeline_stage) + '</span><span class="turn-msg muted">stage started</span><span class="activity-time">' + escapeHtml(agoLabel) + '</span></li>'; + } + return '<li><span class="turn-num">\u2014</span><span class="turn-msg muted">Waiting for agent activity...</span><span></span></li>'; + })() + : displayActivity.map(function (a) { + var ago = ''; + if (a.timestamp) { + var diffMs = Date.now() - new Date(a.timestamp).getTime(); + var secs = Math.max(0, Math.floor(diffMs / 1000)); + ago = secs < 60 ? secs + 's ago' : Math.floor(secs / 60) + 'm ago'; + } + var tokenLabel = (a.totalTokens != null && a.totalTokens > 0) ? ' \u00B7 ' + formatCompactTokens(a.totalTokens) : ''; + return '<li><span class="turn-num">' + escapeHtml(a.toolName) + '</span><span class="turn-msg" title="' + escapeHtml(a.context || '') + '">' + escapeHtml(a.context || '\u2014') + tokenLabel + '</span><span class="activity-time">' + escapeHtml(ago) + '</span></li>'; + }).join(''); + const recentActivity = + '<div class="detail-section">' + + '<p class="detail-section-title">Recent activity</p>' + + '<ul class="turn-timeline">' + recentActivityItems + '</ul>' + + '</div>'; + + const execRows = (!row.execution_history || row.execution_history.length === 0) + ? '<tr><td colspan="6" class="muted">No completed stages.</td></tr>' + : row.execution_history.map(function (s) { + return '<tr><td>' + escapeHtml(s.stageName) + '</td><td class="numeric">' + formatInteger(s.turns) + '</td><td class="numeric">' + formatInteger(s.totalTokens) + '</td><td class="numeric">' + formatInteger(s.inputTokens || 0) + '</td><td class="numeric">' + formatInteger(s.outputTokens || 0) + '</td><td>' + renderOutcomeLabel(s.outcome) + '</td></tr>'; + }).join(''); + const executionHistory = + '<div class="detail-section">' + + '<p class="detail-section-title">Execution history</p>' + + '<table class="exec-history-table"><thead><tr><th>Stage</th><th>Turns</th><th>Tokens</th><th>In</th><th>Out</th><th>Outcome</th></tr></thead>' + + '<tbody>' + execRows + '</tbody></table>' + + '</div>'; + + return '<div class="detail-panel">' + contextSection + '<div class="detail-grid">' + tokenBreakdown + recentActivity + executionHistory + '</div></div>'; + } + function renderRunningRows(next) { if (!next.running || next.running.length === 0) { - return '<tr><td colspan="6"><p class="empty-state">No active sessions.</p></td></tr>'; + return '<tr><td colspan="7"><p class="empty-state">No active sessions.</p></td></tr>'; } return next.running.map(function (row) { + const detailId = 'detail-' + String(row.issue_identifier).replace(/[^a-zA-Z0-9]/g, '-'); const sessionCell = row.session_id ? '<button type="button" class="subtle-button" data-label="Copy ID" data-copy="' + escapeHtml(row.session_id) + '" onclick="navigator.clipboard.writeText(this.dataset.copy); this.textContent = \\'Copied\\'; clearTimeout(this._copyTimer); this._copyTimer = setTimeout(() => { this.textContent = this.dataset.label }, 1200);">Copy ID</button>' : '<span class="muted">n/a</span>'; - const message = row.last_message || row.last_event || 'n/a'; const eventMeta = row.last_event ? escapeHtml(row.last_event) + (row.last_event_at ? ' · <span class="mono numeric">' + escapeHtml(row.last_event_at) + '</span>' : '') : 'n/a'; - return '<tr>' + - '<td><div class="issue-stack"><span class="issue-id">' + escapeHtml(row.issue_identifier) + '</span><a class="issue-link" href="/api/v1/' + encodeURIComponent(row.issue_identifier) + '">JSON details</a></div></td>' + - '<td><span class="' + stateBadgeClass(row.state) + '">' + escapeHtml(row.state) + '</span></td>' + + const reworkHtml = (row.rework_count != null && row.rework_count > 0) + ? '<span class="state-badge state-badge-warning">Rework \xD7' + escapeHtml(row.rework_count) + '</span>' + : ''; + const healthLabel = row.health === 'red' ? '\uD83D\uDD34 Red' : row.health === 'yellow' ? '\uD83D\uDFE1 Yellow' : '\uD83D\uDFE2 Green'; + const healthClass = 'health-badge health-badge-' + (row.health || 'green'); + const healthTitle = row.health_reason ? ' title="' + escapeHtml(row.health_reason) + '"' : ''; + const healthHtml = '<span class="' + healthClass + '"' + healthTitle + '><span class="health-badge-dot"></span>' + escapeHtml(healthLabel) + '</span>'; + const activityText = row.last_tool_call || row.activity_summary || row.last_event || 'n/a'; + const expandToggle = '<button type="button" class="expand-toggle" aria-expanded="false" data-detail="' + escapeHtml(detailId) + '" onclick="const d=document.getElementById(this.dataset.detail);const open=this.getAttribute(\\'aria-expanded\\')=== \\'true\\';d.style.display=open?\\'none\\':\\'table-row\\';this.setAttribute(\\'aria-expanded\\',String(!open));this.textContent=open?\\'\u25B6 Details\\':\\'\u25BC Details\\';">\u25B6 Details</button>'; + + const detailRow = '<tr id="' + escapeHtml(detailId) + '" class="detail-row" style="display:none;"><td colspan="7">' + renderDetailPanel(row, detailId) + '</td></tr>'; + + return '<tr class="session-row">' + + '<td><div class="issue-stack"><span class="issue-id">' + escapeHtml(row.issue_identifier) + '</span><span class="muted issue-title">' + escapeHtml(row.issue_title) + '</span>' + expandToggle + '</div></td>' + + '<td><div class="detail-stack"><span class="' + stateBadgeClass(row.state) + '">' + escapeHtml(row.state) + '</span>' + reworkHtml + healthHtml + '</div></td>' + '<td><div class="session-stack">' + sessionCell + '</div></td>' + '<td class="numeric">' + formatRuntimeAndTurns(row, next.generated_at) + '</td>' + - '<td><div class="detail-stack"><span class="event-text" title="' + escapeHtml(message) + '">' + escapeHtml(message) + '</span><span class="muted event-meta">' + eventMeta + '</span></div></td>' + - '<td><div class="token-stack numeric"><span>Total: ' + formatInteger(row.tokens?.total_tokens) + '</span><span class="muted">In ' + formatInteger(row.tokens?.input_tokens) + ' / Out ' + formatInteger(row.tokens?.output_tokens) + '</span></div></td>' + - '</tr>'; + '<td class="numeric">' + formatPipelineTime(row, next.generated_at) + '</td>' + + '<td><div class="detail-stack"><span class="event-text" title="' + escapeHtml(activityText) + '">' + escapeHtml(activityText) + '</span><span class="muted event-meta">' + eventMeta + '</span></div></td>' + + '<td><div class="token-stack numeric"><span>Total: ' + formatInteger(row.tokens && row.tokens.total_tokens) + '</span><span class="muted">In ' + formatInteger(row.tokens && row.tokens.input_tokens) + ' / Out ' + formatInteger(row.tokens && row.tokens.output_tokens) + '</span><span class="muted">' + formatInteger(row.tokens_per_turn) + ' / turn</span><span class="muted">Pipeline: ' + formatInteger(row.total_pipeline_tokens) + '</span></div></td>' + + '</tr>' + detailRow; }).join(''); } @@ -628,7 +944,7 @@ function renderDashboardClientScript( return next.retrying.map(function (row) { return '<tr>' + - '<td><div class="issue-stack"><span class="issue-id">' + escapeHtml(row.issue_identifier || row.issue_id) + '</span><a class="issue-link" href="/api/v1/' + encodeURIComponent(row.issue_identifier || row.issue_id) + '">JSON details</a></div></td>' + + '<td><div class="issue-stack"><span class="issue-id">' + escapeHtml(row.issue_identifier || row.issue_id) + '</span></div></td>' + '<td>' + escapeHtml(row.attempt) + '</td>' + '<td class="mono">' + escapeHtml(row.due_at || 'n/a') + '</td>' + '<td>' + escapeHtml(row.error || 'n/a') + '</td>' + @@ -650,10 +966,29 @@ function renderDashboardClientScript( document.getElementById('generated-at').textContent = 'Generated at ' + next.generated_at; document.getElementById('metric-running').textContent = String(next.counts.running); document.getElementById('metric-retrying').textContent = String(next.counts.retrying); + document.getElementById('metric-completed').textContent = String(next.counts.completed); + document.getElementById('metric-failed').textContent = String(next.counts.failed); document.getElementById('metric-total').textContent = formatInteger(next.codex_totals.total_tokens); document.getElementById('metric-total-detail').textContent = 'In ' + formatInteger(next.codex_totals.input_tokens) + ' / Out ' + formatInteger(next.codex_totals.output_tokens); document.getElementById('metric-runtime').textContent = formatRuntimeSeconds(next.codex_totals.seconds_running); + // Preserve expand/collapse state before DOM replacement (SYMPH-37) + var expandedIds = new Set(); + document.querySelectorAll('.expand-toggle[aria-expanded="true"]').forEach(function(btn) { + expandedIds.add(btn.getAttribute('data-detail')); + }); document.getElementById('running-rows').innerHTML = renderRunningRows(next); + // Restore expand state after DOM replacement + expandedIds.forEach(function(detailId) { + var btn = document.querySelector('.expand-toggle[data-detail="' + detailId + '"]'); + if (btn) { + var d = document.getElementById(detailId); + if (d) { + d.style.display = 'table-row'; + btn.setAttribute('aria-expanded', 'true'); + btn.textContent = '\u25BC Details'; + } + } + }); document.getElementById('retry-rows').innerHTML = renderRetryRows(next); document.getElementById('rate-limits').textContent = prettyValue(next.rate_limits); } @@ -682,23 +1017,41 @@ function renderDashboardClientScript( })();`; } +function formatPipelineTime( + firstDispatchedAt: string, + startedAt: string, + generatedAt: string, +): string { + if (firstDispatchedAt === startedAt) { + return "\u2014"; + } + const seconds = runtimeSecondsFromStartedAt(firstDispatchedAt, generatedAt); + return formatRuntimeSeconds(seconds); +} + function renderRunningRows(snapshot: RuntimeSnapshot): string { - return snapshot.running.length === 0 - ? '<tr><td colspan="6"><p class="empty-state">No active sessions.</p></td></tr>' - : snapshot.running - .map( - (row) => ` - <tr> + if (snapshot.running.length === 0) { + return '<tr><td colspan="7"><p class="empty-state">No active sessions.</p></td></tr>'; + } + return snapshot.running + .map((row) => { + const detailId = `detail-${row.issue_identifier.replace(/[^a-zA-Z0-9]/g, "-")}`; + const detailPanel = renderDetailPanel(row); + return ` + <tr class="session-row"> <td> <div class="issue-stack"> <span class="issue-id">${escapeHtml(row.issue_identifier)}</span> - <a class="issue-link" href="/api/v1/${encodeURIComponent( - row.issue_identifier, - )}">JSON details</a> + <span class="muted issue-title">${escapeHtml(row.issue_title)}</span> + <button type="button" class="expand-toggle" aria-expanded="false" data-detail="${escapeHtml(detailId)}" onclick="const d=document.getElementById(this.dataset.detail);const open=this.getAttribute('aria-expanded')==='true';d.style.display=open?'none':'table-row';this.setAttribute('aria-expanded',String(!open));this.textContent=open?'\u25B6 Details':'\u25BC Details';">▶ Details</button> </div> </td> <td> - <span class="${stateBadgeClass(row.state)}">${escapeHtml(row.state)}</span> + <div class="detail-stack"> + <span class="${stateBadgeClass(row.state)}">${escapeHtml(row.state)}</span> + ${row.rework_count !== undefined && row.rework_count > 0 ? `<span class="state-badge state-badge-warning">Rework ×${escapeHtml(row.rework_count)}</span>` : ""} + ${renderHealthBadge(row.health, row.health_reason)} + </div> </td> <td> <div class="session-stack"> @@ -716,12 +1069,23 @@ function renderRunningRows(snapshot: RuntimeSnapshot): string { row.turn_count, snapshot.generated_at, )}</td> + <td class="numeric">${formatPipelineTime( + row.first_dispatched_at, + row.started_at, + snapshot.generated_at, + )}</td> <td> <div class="detail-stack"> <span class="event-text" title="${escapeHtml( - row.last_message ?? row.last_event ?? "n/a", + row.last_tool_call ?? + row.activity_summary ?? + row.last_event ?? + "n/a", )}">${escapeHtml( - row.last_message ?? row.last_event ?? "n/a", + row.last_tool_call ?? + row.activity_summary ?? + row.last_event ?? + "n/a", )}</span> <span class="muted event-meta">${escapeHtml( row.last_event ?? "n/a", @@ -740,11 +1104,124 @@ function renderRunningRows(snapshot: RuntimeSnapshot): string { <span class="muted">In ${formatInteger( row.tokens.input_tokens, )} / Out ${formatInteger(row.tokens.output_tokens)}</span> + <span class="muted">${formatInteger(row.tokens_per_turn)} / turn</span> + <span class="muted">Pipeline: ${formatInteger(row.total_pipeline_tokens)}</span> </div> </td> - </tr>`, - ) - .join(""); + </tr> + <tr id="${escapeHtml(detailId)}" class="detail-row" style="display:none;"> + <td colspan="7">${detailPanel}</td> + </tr>`; + }) + .join(""); +} + +function renderDetailPanel(row: RuntimeSnapshot["running"][number]): string { + const contextItems: string[] = []; + + if (row.pipeline_stage !== null) { + contextItems.push( + `<span class="context-item"><span class="context-label">Stage</span> <span class="stage-badge">${escapeHtml(row.pipeline_stage)}</span></span>`, + ); + } + + if (row.activity_summary !== null) { + contextItems.push( + `<span class="context-item"><span class="context-label">Doing</span> <span class="context-value">${escapeHtml(row.activity_summary)}</span></span>`, + ); + } + + if (row.health_reason !== null) { + const healthClass = + row.health === "red" ? "context-health-red" : "context-health-yellow"; + contextItems.push( + `<span class="context-item"><span class="context-label">Health</span> <span class="${healthClass}">${escapeHtml(row.health_reason)}</span></span>`, + ); + } + + if (row.rework_count !== undefined && row.rework_count > 0) { + contextItems.push( + `<span class="context-item"><span class="context-label">Rework</span> <span class="state-badge state-badge-warning">\u00D7${formatInteger(row.rework_count)}</span></span>`, + ); + } + + const contextSection = + contextItems.length > 0 + ? `<div class="context-section">${contextItems.join("")}</div>` + : ""; + + const tokenBreakdown = ` + <div class="detail-section"> + <p class="detail-section-title">Token breakdown</p> + <div class="detail-kv"> + <span class="detail-kv-label">Input</span><span class="detail-kv-value numeric">${formatInteger(row.tokens.input_tokens)}</span> + <span class="detail-kv-label">Output</span><span class="detail-kv-value numeric">${formatInteger(row.tokens.output_tokens)}</span> + <span class="detail-kv-label">Total</span><span class="detail-kv-value numeric">${formatInteger(row.tokens.total_tokens)}</span> + <span class="detail-kv-label">Cache read</span><span class="detail-kv-value numeric">${formatInteger(row.tokens.cache_read_tokens)}</span> + <span class="detail-kv-label">Cache write</span><span class="detail-kv-value numeric">${formatInteger(row.tokens.cache_write_tokens)}</span> + <span class="detail-kv-label">Reasoning</span><span class="detail-kv-value numeric">${formatInteger(row.tokens.reasoning_tokens)}</span> + <span class="detail-kv-label">Pipeline</span><span class="detail-kv-value numeric">${formatInteger(row.total_pipeline_tokens)}</span> + </div> + </div>`; + + const displayActivity = row.recent_activity.slice(-5); + const recentActivityRows = + displayActivity.length === 0 + ? (() => { + // Fallback: show stage-level status when session is active but no tool calls yet + if (row.pipeline_stage !== null) { + const startMs = Date.parse(row.started_at); + const elapsedSecs = Number.isFinite(startMs) + ? Math.max(0, Math.floor((Date.now() - startMs) / 1000)) + : 0; + const agoLabel = + elapsedSecs < 60 + ? `${elapsedSecs}s ago` + : `${Math.floor(elapsedSecs / 60)}m ago`; + return `<li><span class="turn-num">${escapeHtml(row.pipeline_stage)}</span><span class="turn-msg muted">stage started</span><span class="activity-time">${escapeHtml(agoLabel)}</span></li>`; + } + return '<li><span class="turn-num">\u2014</span><span class="turn-msg muted">Waiting for agent activity...</span><span></span></li>'; + })() + : displayActivity + .map((a) => { + const diffMs = Date.now() - new Date(a.timestamp).getTime(); + const secs = Math.max(0, Math.floor(diffMs / 1000)); + const ago = + secs < 60 ? `${secs}s ago` : `${Math.floor(secs / 60)}m ago`; + const tokenLabel = + a.totalTokens !== undefined && a.totalTokens > 0 + ? ` \u00B7 ${formatCompactTokens(a.totalTokens)}` + : ""; + return `<li><span class="turn-num">${escapeHtml(a.toolName)}</span><span class="turn-msg" title="${escapeHtml(a.context ?? "")}">${escapeHtml(a.context ?? "\u2014")}${tokenLabel}</span><span class="activity-time">${escapeHtml(ago)}</span></li>`; + }) + .join(""); + + const recentActivity = ` + <div class="detail-section"> + <p class="detail-section-title">Recent activity</p> + <ul class="turn-timeline">${recentActivityRows}</ul> + </div>`; + + const execHistoryRows = + row.execution_history.length === 0 + ? `<tr><td colspan="6" class="muted">No completed stages.</td></tr>` + : row.execution_history + .map( + (s) => + `<tr><td>${escapeHtml(s.stageName)}</td><td class="numeric">${formatInteger(s.turns)}</td><td class="numeric">${formatInteger(s.totalTokens)}</td><td class="numeric">${formatInteger(s.inputTokens ?? 0)}</td><td class="numeric">${formatInteger(s.outputTokens ?? 0)}</td><td>${renderOutcomeLabel(s.outcome)}</td></tr>`, + ) + .join(""); + + const executionHistory = ` + <div class="detail-section"> + <p class="detail-section-title">Execution history</p> + <table class="exec-history-table"> + <thead><tr><th>Stage</th><th>Turns</th><th>Tokens</th><th>In</th><th>Out</th><th>Outcome</th></tr></thead> + <tbody>${execHistoryRows}</tbody> + </table> + </div>`; + + return `<div class="detail-panel">${contextSection}<div class="detail-grid">${tokenBreakdown}${recentActivity}${executionHistory}</div></div>`; } function renderRetryRows(snapshot: RuntimeSnapshot): string { @@ -757,9 +1234,6 @@ function renderRetryRows(snapshot: RuntimeSnapshot): string { <td> <div class="issue-stack"> <span class="issue-id">${escapeHtml(row.issue_identifier ?? row.issue_id)}</span> - <a class="issue-link" href="/api/v1/${encodeURIComponent( - row.issue_identifier ?? row.issue_id, - )}">JSON details</a> </div> </td> <td>${row.attempt}</td> @@ -769,3 +1243,44 @@ function renderRetryRows(snapshot: RuntimeSnapshot): string { ) .join(""); } + +function renderHealthBadge( + health: "green" | "yellow" | "red", + healthReason: string | null, +): string { + const label = + health === "red" + ? "🔴 Red" + : health === "yellow" + ? "🟡 Yellow" + : "🟢 Green"; + const cssClass = `health-badge health-badge-${health}`; + const title = + healthReason !== null ? ` title="${escapeHtml(healthReason)}"` : ""; + return `<span class="${cssClass}"${title}><span class="health-badge-dot"></span>${escapeHtml(label)}</span>`; +} + +function formatCompactTokens(tokens: number): string { + if (tokens >= 1_000_000) { + return `${(tokens / 1_000_000).toFixed(1)}M`; + } + if (tokens >= 1_000) { + return `${(tokens / 1_000).toFixed(1)}k`; + } + return String(tokens); +} + +function renderOutcomeLabel(outcome: string): string { + switch (outcome) { + case "normal": + return '<span style="color: var(--accent-ink)">normal</span>'; + case "failed_to_start": + return '<span style="color: var(--danger)">failed to start</span>'; + case "timed_out": + return '<span style="color: var(--warning)">timed out</span>'; + case "error": + return '<span style="color: var(--danger)">error</span>'; + default: + return escapeHtml(outcome); + } +} diff --git a/src/observability/dashboard-server.ts b/src/observability/dashboard-server.ts index bf53c9fd..470d9f99 100644 --- a/src/observability/dashboard-server.ts +++ b/src/observability/dashboard-server.ts @@ -111,7 +111,7 @@ export interface DashboardServerInstance { } export function createDashboardServer(options: DashboardServerOptions): Server { - const hostname = options.hostname ?? "127.0.0.1"; + const hostname = options.hostname ?? "0.0.0.0"; const snapshotTimeoutMs = options.snapshotTimeoutMs ?? DEFAULT_SNAPSHOT_TIMEOUT_MS; const liveController = new DashboardLiveUpdatesController({ @@ -144,7 +144,7 @@ export async function startDashboardServer( }, ): Promise<DashboardServerInstance> { const server = createDashboardServer(options); - const hostname = options.hostname ?? "127.0.0.1"; + const hostname = options.hostname ?? "0.0.0.0"; await new Promise<void>((resolve, reject) => { server.once("error", reject); @@ -183,7 +183,7 @@ export function createDashboardRequestHandler( liveController?: DashboardLiveUpdatesController; }, ): (request: IncomingMessage, response: ServerResponse) => Promise<void> { - const hostname = options.hostname ?? "127.0.0.1"; + const hostname = options.hostname ?? "0.0.0.0"; const snapshotTimeoutMs = options.snapshotTimeoutMs ?? DEFAULT_SNAPSHOT_TIMEOUT_MS; const renderOptions: DashboardRenderOptions = { diff --git a/src/orchestrator/core.ts b/src/orchestrator/core.ts index bc4abfa2..1b73d686 100644 --- a/src/orchestrator/core.ts +++ b/src/orchestrator/core.ts @@ -3,26 +3,44 @@ import { validateDispatchConfig } from "../config/config-resolver.js"; import type { DispatchValidationResult, ResolvedWorkflowConfig, + StageDefinition, } from "../config/types.js"; import { + type FailureClass, type Issue, + type LiveSession, type OrchestratorState, type RetryEntry, type RunningEntry, + type StageRecord, createEmptyLiveSession, createInitialOrchestratorState, normalizeIssueState, + parseFailureSignal, } from "../domain/model.js"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; import { addEndedSessionRuntime, + addPipelineActivity, applyCodexEventToOrchestratorState, } from "../logging/session-metrics.js"; import type { IssueStateSnapshot, IssueTracker } from "../tracker/tracker.js"; +import { + type EnsembleGateResult, + formatExecutionReport, + formatRebaseComment, + formatReviewFindingsComment, +} from "./gate-handler.js"; const CONTINUATION_RETRY_DELAY_MS = 1_000; const FAILURE_RETRY_BASE_DELAY_MS = 10_000; -export type WorkerExitOutcome = "normal" | "abnormal"; +export type WorkerExitOutcome = + | "normal" + | "abnormal" + | "failed_to_start" + | "timed_out" + | "error"; export type StopReason = "terminal_state" | "inactive_state" | "stall_timeout"; @@ -44,6 +62,7 @@ export interface PollTickResult { stopRequests: StopRequest[]; trackerFetchFailed: boolean; reconciliationFetchFailed: boolean; + runningCount: number; } export interface RetryTimerResult { @@ -70,6 +89,9 @@ export interface OrchestratorCoreOptions { spawnWorker: (input: { issue: Issue; attempt: number | null; + stage: StageDefinition | null; + stageName: string | null; + reworkCount: number; }) => Promise<SpawnWorkerResult> | SpawnWorkerResult; stopRunningIssue?: (input: { issueId: string; @@ -77,6 +99,20 @@ export interface OrchestratorCoreOptions { cleanupWorkspace: boolean; reason: StopReason; }) => Promise<void> | void; + runEnsembleGate?: (input: { + issue: Issue; + stage: StageDefinition; + }) => Promise<EnsembleGateResult>; + postComment?: (issueId: string, body: string) => Promise<void>; + updateIssueState?: ( + issueId: string, + issueIdentifier: string, + stateName: string, + ) => Promise<void>; + autoCloseParentIssue?: ( + issueId: string, + issueIdentifier: string, + ) => Promise<void>; timerScheduler?: TimerScheduler; now?: () => Date; } @@ -90,17 +126,40 @@ export class OrchestratorCore { private readonly stopRunningIssue?: OrchestratorCoreOptions["stopRunningIssue"]; + private readonly runEnsembleGate?: OrchestratorCoreOptions["runEnsembleGate"]; + + private readonly postComment?: OrchestratorCoreOptions["postComment"]; + + private readonly updateIssueState?: OrchestratorCoreOptions["updateIssueState"]; + + private readonly autoCloseParentIssue?: OrchestratorCoreOptions["autoCloseParentIssue"]; + private readonly timerScheduler: TimerScheduler; private readonly now: () => Date; private readonly state: OrchestratorState; + /** + * Snapshot of execution history captured after the final stage record is + * appended but before advanceStage() deletes issueExecutionHistory. + * This prevents the runtime-host from falling back to stale preHistory + * when a terminal transition clears the canonical history. + */ + private readonly lastExitHistorySnapshot: Map< + string, + import("../domain/model.js").ExecutionHistory + > = new Map(); + constructor(options: OrchestratorCoreOptions) { this.config = options.config; this.tracker = options.tracker; this.spawnWorker = options.spawnWorker; this.stopRunningIssue = options.stopRunningIssue; + this.runEnsembleGate = options.runEnsembleGate; + this.postComment = options.postComment; + this.updateIssueState = options.updateIssueState; + this.autoCloseParentIssue = options.autoCloseParentIssue; this.timerScheduler = options.timerScheduler ?? defaultTimerScheduler(); this.now = options.now ?? (() => new Date()); this.state = createInitialOrchestratorState({ @@ -113,6 +172,21 @@ export class OrchestratorCore { return this.state; } + /** + * Retrieve and consume the execution history snapshot captured during the + * most recent onWorkerExit call for the given issue. Returns undefined if + * no snapshot exists (e.g., the exit did not append a stage record). + */ + consumeExitHistorySnapshot( + issueId: string, + ): import("../domain/model.js").ExecutionHistory | undefined { + const snapshot = this.lastExitHistorySnapshot.get(issueId); + if (snapshot !== undefined) { + this.lastExitHistorySnapshot.delete(issueId); + } + return snapshot; + } + updateConfig(config: ResolvedWorkflowConfig): void { this.config = config; this.syncStateFromConfig(); @@ -150,6 +224,22 @@ export class OrchestratorCore { return false; } + // Allow resumed issues: clear completed flag ONLY when a human has + // explicitly moved the issue to a resume-designated state ("Resume" or + // "Todo"). Issues still in operational states like "In Progress" or + // "In Review" stay completed — they haven't been deliberately requeued. + // Issues in the escalation state ("Blocked") also stay completed until + // a human explicitly moves them. + if (this.state.completed.has(issue.id) || this.state.failed.has(issue.id)) { + const resumeStates: ReadonlySet<string> = new Set(["resume", "todo"]); + if (resumeStates.has(normalizedState)) { + this.state.completed.delete(issue.id); + this.state.failed.delete(issue.id); + } else { + return false; + } + } + const allowClaimedIssueId = options?.allowClaimedIssueId; if ( this.state.claimed.has(issue.id) && @@ -165,10 +255,6 @@ export class OrchestratorCore { return false; } - if (normalizedState !== "todo") { - return true; - } - return issue.blockedBy.every((blocker) => { const blockerState = blocker.state === null ? null : normalizeIssueState(blocker.state); @@ -188,6 +274,7 @@ export class OrchestratorCore { stopRequests: reconcileResult.stopRequests, trackerFetchFailed: false, reconciliationFetchFailed: reconcileResult.reconciliationFetchFailed, + runningCount: Object.keys(this.state.running).length, }; } @@ -201,6 +288,23 @@ export class OrchestratorCore { stopRequests: reconcileResult.stopRequests, trackerFetchFailed: true, reconciliationFetchFailed: reconcileResult.reconciliationFetchFailed, + runningCount: Object.keys(this.state.running).length, + }; + } + + // Check for pipeline-halt before dispatching + const haltIssue = await this.checkPipelineHalt(); + if (haltIssue !== null) { + console.warn( + `[orchestrator] Pipeline halted: ${haltIssue.identifier} — ${haltIssue.title}. Skipping all dispatch.`, + ); + return { + validation, + dispatchedIssueIds: [], + stopRequests: reconcileResult.stopRequests, + trackerFetchFailed: false, + reconciliationFetchFailed: reconcileResult.reconciliationFetchFailed, + runningCount: Object.keys(this.state.running).length, }; } @@ -226,6 +330,7 @@ export class OrchestratorCore { stopRequests: reconcileResult.stopRequests, trackerFetchFailed: false, reconciliationFetchFailed: reconcileResult.reconciliationFetchFailed, + runningCount: Object.keys(this.state.running).length, }; } @@ -239,6 +344,25 @@ export class OrchestratorCore { }; } + // Check for pipeline-halt before dispatching — fail-open on errors + const haltIssue = await this.checkPipelineHalt(); + if (haltIssue !== null) { + console.warn( + `[orchestrator] Pipeline halted: ${haltIssue.identifier} — ${haltIssue.title}. Deferring retry for ${retryEntry.identifier ?? issueId}.`, + ); + // Don't consume the retry attempt — reschedule at the same attempt number + this.clearRetryEntry(issueId); + return { + dispatched: false, + released: false, + retryEntry: this.scheduleRetry(issueId, retryEntry.attempt, { + identifier: retryEntry.identifier, + error: `pipeline halted: ${haltIssue.identifier}`, + delayType: retryEntry.delayType, + }), + }; + } + this.clearRetryEntry(issueId); let candidates: Issue[]; @@ -251,7 +375,7 @@ export class OrchestratorCore { retryEntry: this.scheduleRetry(issueId, retryEntry.attempt + 1, { identifier: retryEntry.identifier, error: "retry poll failed", - delayType: "failure", + delayType: retryEntry.delayType, }), }; } @@ -286,7 +410,7 @@ export class OrchestratorCore { retryEntry: this.scheduleRetry(issueId, retryEntry.attempt + 1, { identifier: issue.identifier, error: "no available orchestrator slots", - delayType: "failure", + delayType: retryEntry.delayType, }), }; } @@ -304,6 +428,7 @@ export class OrchestratorCore { outcome: WorkerExitOutcome; reason?: string; endedAt?: Date; + agentMessage?: string; }): RetryEntry | null { const runningEntry = this.state.running[input.issueId]; if (runningEntry === undefined) { @@ -311,14 +436,63 @@ export class OrchestratorCore { } delete this.state.running[input.issueId]; - addEndedSessionRuntime( - this.state, - runningEntry.startedAt, - input.endedAt ?? this.now(), + const endedAt = input.endedAt ?? this.now(); + addEndedSessionRuntime(this.state, runningEntry.startedAt, endedAt); + + // Classify "abnormal" into a more descriptive outcome for stage records + const classifiedOutcome = classifyExitOutcome( + input.outcome, + runningEntry.turnCount, + input.reason, ); + // Append a StageRecord to execution history for this completed stage. + const stageName = this.state.issueStages[input.issueId]; + if (stageName !== undefined) { + const stageRecord: StageRecord = { + stageName, + durationMs: endedAt.getTime() - Date.parse(runningEntry.startedAt), + totalTokens: runningEntry.totalStageTotalTokens, + inputTokens: runningEntry.totalStageInputTokens, + outputTokens: runningEntry.totalStageOutputTokens, + turns: runningEntry.turnCount, + outcome: classifiedOutcome, + }; + let history = this.state.issueExecutionHistory[input.issueId]; + if (history === undefined) { + history = []; + this.state.issueExecutionHistory[input.issueId] = history; + } + history.push(stageRecord); + + // Snapshot history after the push so runtime-host can read it even if + // advanceStage() deletes issueExecutionHistory for terminal transitions. + this.lastExitHistorySnapshot.set(input.issueId, [...history]); + } + if (input.outcome === "normal") { - this.state.completed.add(input.issueId); + const failureSignal = parseFailureSignal(input.agentMessage); + if (failureSignal !== null) { + return this.handleFailureSignal( + input.issueId, + runningEntry, + failureSignal.failureClass, + input.agentMessage, + ); + } + + const transition = this.advanceStage( + input.issueId, + runningEntry.identifier, + runningEntry, + ); + if (transition === "completed") { + this.state.completed.add(input.issueId); + this.releaseClaim(input.issueId); + return null; + } + + // Stage advanced or no stages configured — schedule continuation return this.scheduleRetry(input.issueId, 1, { identifier: runningEntry.identifier, error: null, @@ -337,6 +511,677 @@ export class OrchestratorCore { ); } + /** + * Advance issue to next stage based on transition rules. + * Returns "completed" if the issue reached a terminal stage, + * "advanced" if it moved to the next stage, or "unchanged" if + * no stages are configured. + * + * When reaching a terminal stage that has a linearState configured, + * fires updateIssueState as a best-effort side effect so the + * tracker reflects the final state (e.g., "Done"). + */ + private advanceStage( + issueId: string, + issueIdentifier: string, + session?: LiveSession, + ): "completed" | "advanced" | "unchanged" { + const stagesConfig = this.config.stages; + if (stagesConfig === null) { + return "unchanged"; + } + + const currentStageName = this.state.issueStages[issueId]; + if (currentStageName === undefined) { + return "unchanged"; + } + + const currentStage = stagesConfig.stages[currentStageName]; + if (currentStage === undefined) { + return "unchanged"; + } + + const nextStageName = currentStage.transitions.onComplete; + if (nextStageName === null) { + // No on_complete transition — treat as terminal + delete this.state.issueStages[issueId]; + delete this.state.issueReworkCounts[issueId]; + delete this.state.issueExecutionHistory[issueId]; + delete this.state.issueFirstDispatchedAt[issueId]; + return "completed"; + } + + const nextStage = stagesConfig.stages[nextStageName]; + if (nextStage === undefined) { + // Invalid target — treat as terminal + delete this.state.issueStages[issueId]; + delete this.state.issueReworkCounts[issueId]; + delete this.state.issueExecutionHistory[issueId]; + delete this.state.issueFirstDispatchedAt[issueId]; + return "completed"; + } + + if (nextStage.type === "terminal") { + // Post execution report before cleanup (best-effort) + if (nextStage.linearState !== null && this.postComment !== undefined) { + const history = this.state.issueExecutionHistory[issueId] ?? []; + const reworkCount = this.state.issueReworkCounts[issueId] ?? 0; + const report = formatExecutionReport( + issueIdentifier, + history, + reworkCount, + ); + void this.postComment(issueId, report).catch((err) => { + console.warn( + `[orchestrator] Failed to post execution report for ${issueIdentifier}:`, + err, + ); + }); + } + delete this.state.issueStages[issueId]; + delete this.state.issueReworkCounts[issueId]; + delete this.state.issueExecutionHistory[issueId]; + delete this.state.issueFirstDispatchedAt[issueId]; + // Fire linearState update for the terminal stage (e.g., move to "Done") + if ( + nextStage.linearState !== null && + this.updateIssueState !== undefined + ) { + void this.updateIssueState( + issueId, + issueIdentifier, + nextStage.linearState, + ).catch((err) => { + console.warn( + `[orchestrator] Failed to update terminal state for ${issueIdentifier}:`, + err, + ); + }); + } + // Best-effort: check if all sibling sub-issues are terminal and auto-close parent + if (this.autoCloseParentIssue !== undefined) { + void this.autoCloseParentIssue(issueId, issueIdentifier).catch( + (err) => { + console.warn( + `[orchestrator] Failed to auto-close parent for ${issueIdentifier}:`, + err, + ); + }, + ); + } + return "completed"; + } + + // Move to the next stage + this.state.issueStages[issueId] = nextStageName; + if (session !== undefined) { + addPipelineActivity( + session, + "stage_transition", + `Stage → ${nextStageName}`, + ); + } + return "advanced"; + } + + /** + * Handle agent-reported failure signals parsed from output. + * Routes to retry, rework, or escalation based on failure class. + */ + private handleFailureSignal( + issueId: string, + runningEntry: RunningEntry, + failureClass: FailureClass, + agentMessage: string | undefined, + ): RetryEntry | null { + if (failureClass === "spec") { + // Spec failures are unrecoverable — escalate immediately + this.state.failed.add(issueId); + this.releaseClaim(issueId); + delete this.state.issueStages[issueId]; + delete this.state.issueReworkCounts[issueId]; + delete this.state.issueExecutionHistory[issueId]; + delete this.state.issueFirstDispatchedAt[issueId]; + void this.fireEscalationSideEffects( + issueId, + runningEntry.identifier, + "Agent reported unrecoverable spec failure. Escalating for manual review.", + ); + return null; + } + + if (failureClass === "verify" || failureClass === "infra") { + // Retryable failures — use existing exponential backoff + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: `agent reported failure: ${failureClass}`, + delayType: "failure", + }, + ); + } + + if (failureClass === "rebase") { + // Rebase failures — trigger rework if onRework configured, else retry + return this.handleRebaseFailure(issueId, runningEntry, agentMessage); + } + + // failureClass === "review" — trigger rework via gate lookup + return this.handleReviewFailure(issueId, runningEntry, agentMessage); + } + + /** + * Handle review failure: find the downstream gate and use its rework target. + * Falls back to retry if no gate or rework target is found. + * Posts a review findings comment before triggering rework. + */ + private handleReviewFailure( + issueId: string, + runningEntry: RunningEntry, + agentMessage: string | undefined, + ): RetryEntry | null { + const stagesConfig = this.config.stages; + if (stagesConfig === null) { + // No stages — fall back to retry + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: "agent reported failure: review", + delayType: "failure", + }, + ); + } + + const currentStageName = this.state.issueStages[issueId]; + if (currentStageName === undefined) { + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: "agent reported failure: review", + delayType: "failure", + }, + ); + } + + // Check if the current stage itself has onRework (agent-type review stages) + const currentStage = stagesConfig.stages[currentStageName]; + if ( + currentStage !== undefined && + currentStage.type === "agent" && + currentStage.transitions.onRework !== null + ) { + // Use reworkGate directly — it now supports agent stages with onRework + const reworkTarget = this.reworkGate(issueId); + if (reworkTarget === "escalated") { + void this.fireEscalationSideEffects( + issueId, + runningEntry.identifier, + "Agent review failure: max rework attempts exceeded. Escalating for manual review.", + ); + return null; + } + if (reworkTarget !== null) { + this.postReviewFindingsComment( + issueId, + runningEntry.identifier, + currentStageName, + agentMessage, + ); + return this.scheduleRetry(issueId, 1, { + identifier: runningEntry.identifier, + error: `agent review failure: rework to ${reworkTarget}`, + delayType: "continuation", + }); + } + // reworkTarget === null should not happen since we checked onRework !== null, + // but fall through to downstream gate search just in case + } + + // Walk from current stage's onComplete to find the next gate + const gateName = this.findDownstreamGate(currentStageName); + if (gateName === null) { + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: "agent reported failure: review", + delayType: "failure", + }, + ); + } + + // Use the gate's rework logic (reuses reworkGate by temporarily setting stage) + // biome-ignore lint/style/noNonNullAssertion: issueId is guaranteed to exist in issueStages at this point + const savedStage = this.state.issueStages[issueId]!; + this.state.issueStages[issueId] = gateName; + let reworkTarget: string | "escalated" | null; + try { + reworkTarget = this.reworkGate(issueId); + } catch (err) { + this.state.issueStages[issueId] = savedStage; + throw err; + } + if (reworkTarget === null) { + // No rework target — restore and fall back to retry + this.state.issueStages[issueId] = savedStage; + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: + "agent reported failure: review (no rework target on downstream gate)", + delayType: "failure", + }, + ); + } + + if (reworkTarget === "escalated") { + // reworkGate already cleaned up state — fire escalation side effects + void this.fireEscalationSideEffects( + issueId, + runningEntry.identifier, + "Agent review failure: max rework attempts exceeded. Escalating for manual review.", + ); + return null; + } + + // Rework target set by reworkGate — post findings and schedule continuation + this.postReviewFindingsComment( + issueId, + runningEntry.identifier, + currentStageName, + agentMessage, + ); + return this.scheduleRetry(issueId, 1, { + identifier: runningEntry.identifier, + error: `agent review failure: rework to ${reworkTarget}`, + delayType: "continuation", + }); + } + + /** + * Post a review findings comment as a best-effort side effect. + * Uses void...catch pattern to never affect pipeline flow. + */ + private postReviewFindingsComment( + issueId: string, + issueIdentifier: string, + stageName: string, + agentMessage: string | undefined, + ): void { + if (this.postComment === undefined) { + return; + } + const comment = formatReviewFindingsComment( + issueIdentifier, + stageName, + agentMessage ?? "", + ); + void this.postComment(issueId, comment).catch((err) => { + console.warn( + `[orchestrator] Failed to post review findings comment for ${issueIdentifier}:`, + err, + ); + }); + } + + /** + * Handle rebase failure: check current stage for onRework and trigger rework. + * Mirrors the first half of handleReviewFailure() — checks the current stage + * has onRework, calls reworkGate(), posts a rebase comment, and schedules + * a continuation retry. Falls back to retryable failure if no onRework. + */ + private handleRebaseFailure( + issueId: string, + runningEntry: RunningEntry, + agentMessage: string | undefined, + ): RetryEntry | null { + const stagesConfig = this.config.stages; + if (stagesConfig === null) { + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: "agent reported failure: rebase", + delayType: "failure", + }, + ); + } + + const currentStageName = this.state.issueStages[issueId]; + if (currentStageName === undefined) { + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: "agent reported failure: rebase", + delayType: "failure", + }, + ); + } + + const currentStage = stagesConfig.stages[currentStageName]; + if ( + currentStage !== undefined && + currentStage.type === "agent" && + currentStage.transitions.onRework !== null + ) { + const reworkTarget = this.reworkGate(issueId); + if (reworkTarget === "escalated") { + void this.fireEscalationSideEffects( + issueId, + runningEntry.identifier, + "Rebase failure: max rework attempts exceeded. Escalating for manual review.", + ); + return null; + } + if (reworkTarget !== null) { + this.postRebaseComment( + issueId, + runningEntry.identifier, + currentStageName, + agentMessage, + ); + return this.scheduleRetry(issueId, 1, { + identifier: runningEntry.identifier, + error: `rebase failure: rework to ${reworkTarget}`, + delayType: "continuation", + }); + } + } + + // No onRework configured — fall back to retryable failure + return this.scheduleRetry( + issueId, + nextRetryAttempt(runningEntry.retryAttempt), + { + identifier: runningEntry.identifier, + error: "agent reported failure: rebase", + delayType: "failure", + }, + ); + } + + /** + * Post a rebase comment as a best-effort side effect. + * Uses void...catch pattern to never affect pipeline flow. + */ + private postRebaseComment( + issueId: string, + issueIdentifier: string, + stageName: string, + agentMessage: string | undefined, + ): void { + if (this.postComment === undefined) { + return; + } + const comment = formatRebaseComment( + issueIdentifier, + stageName, + agentMessage ?? "", + ); + void this.postComment(issueId, comment).catch((err) => { + console.warn( + `[orchestrator] Failed to post rebase comment for ${issueIdentifier}:`, + err, + ); + }); + } + + /** + * Walk from a stage's onComplete transition to find the next gate stage. + * Returns the gate stage name or null if none found. + */ + private findDownstreamGate(startStageName: string): string | null { + const stagesConfig = this.config.stages; + if (stagesConfig === null) { + return null; + } + + const visited = new Set<string>(); + let current = startStageName; + + while (!visited.has(current)) { + visited.add(current); + const stage = stagesConfig.stages[current]; + if (stage === undefined) { + return null; + } + + const next = stage.transitions.onComplete; + if (next === null) { + return null; + } + + const nextStage = stagesConfig.stages[next]; + if (nextStage === undefined) { + return null; + } + + if (nextStage.type === "gate") { + return next; + } + + // Agent-type stages with onRework can also serve as rework gates + if ( + nextStage.type === "agent" && + nextStage.transitions.onRework !== null + ) { + return next; + } + + current = next; + } + + return null; + } + + /** + * Fire escalation side effects (updateIssueState + postComment). + * Best-effort: failures are logged, not propagated. + */ + private async fireEscalationSideEffects( + issueId: string, + issueIdentifier: string, + comment: string, + ): Promise<void> { + if ( + this.config.escalationState !== null && + this.updateIssueState !== undefined + ) { + try { + await this.updateIssueState( + issueId, + issueIdentifier, + this.config.escalationState, + ); + } catch (err) { + console.warn( + `[orchestrator] Failed to update escalation state for ${issueIdentifier}:`, + err, + ); + } + } + if (this.postComment !== undefined) { + try { + await this.postComment(issueId, comment); + } catch (err) { + console.warn( + `[orchestrator] Failed to post escalation comment for ${issueIdentifier}:`, + err, + ); + } + } + } + + /** + * Run ensemble gate: spawn reviewers, aggregate, transition. + * Called asynchronously from dispatchIssue for ensemble gates. + */ + private async handleEnsembleGate( + issue: Issue, + stage: StageDefinition, + ): Promise<void> { + try { + // biome-ignore lint/style/noNonNullAssertion: runEnsembleGate is guaranteed to be set when this method is called + const result = await this.runEnsembleGate!({ issue, stage }); + + if (result.aggregate === "pass") { + const nextStage = this.approveGate(issue.id); + if (nextStage !== null) { + this.scheduleRetry(issue.id, 1, { + identifier: issue.identifier, + error: null, + delayType: "continuation", + }); + } + } else { + const reworkTarget = this.reworkGate(issue.id); + if (reworkTarget !== null && reworkTarget !== "escalated") { + this.scheduleRetry(issue.id, 1, { + identifier: issue.identifier, + error: `Ensemble review failed: ${result.comment.slice(0, 200)}`, + delayType: "continuation", + }); + } else if (reworkTarget === "escalated") { + if ( + this.config.escalationState !== null && + this.updateIssueState !== undefined + ) { + try { + await this.updateIssueState( + issue.id, + issue.identifier, + this.config.escalationState, + ); + } catch (err) { + console.warn( + `[orchestrator] Failed to update escalation state for ${issue.identifier}:`, + err, + ); + } + } + if (this.postComment !== undefined) { + const maxRework = + stage.type === "gate" ? (stage.maxRework ?? 0) : 0; + try { + await this.postComment( + issue.id, + `Ensemble review: max rework attempts (${maxRework}) exceeded. Escalating for manual review.`, + ); + } catch (err) { + // Comment posting is best-effort — don't fail the gate on it. + console.warn( + `[orchestrator] Failed to post escalation comment for ${issue.identifier}:`, + err, + ); + } + } + } + } + } catch { + // Gate handler failure — release claim so the issue can be retried on next poll. + this.releaseClaim(issue.id); + } + } + + /** + * Handle gate approval: advance to on_approve target. + * Returns the next stage name, or null if already terminal/invalid. + */ + approveGate(issueId: string): string | null { + const stagesConfig = this.config.stages; + if (stagesConfig === null) { + return null; + } + + const currentStageName = this.state.issueStages[issueId]; + if (currentStageName === undefined) { + return null; + } + + const currentStage = stagesConfig.stages[currentStageName]; + if (currentStage === undefined || currentStage.type !== "gate") { + return null; + } + + const nextStageName = currentStage.transitions.onApprove; + if (nextStageName === null) { + return null; + } + + this.state.issueStages[issueId] = nextStageName; + return nextStageName; + } + + /** + * Handle gate rework: send issue back to rework target. + * Tracks rework count and escalates to terminal if max exceeded. + * Works for both gate-type stages and agent-type stages with onRework set. + * Returns the rework target stage name, "escalated" if max rework + * exceeded, or null if no rework transition defined. + */ + reworkGate(issueId: string): string | "escalated" | null { + const stagesConfig = this.config.stages; + if (stagesConfig === null) { + return null; + } + + const currentStageName = this.state.issueStages[issueId]; + if (currentStageName === undefined) { + return null; + } + + const currentStage = stagesConfig.stages[currentStageName]; + if (currentStage === undefined) { + return null; + } + + // Allow gate stages (always) and agent stages with onRework set + if ( + currentStage.type !== "gate" && + !( + currentStage.type === "agent" && + currentStage.transitions.onRework !== null + ) + ) { + return null; + } + + const reworkTarget = currentStage.transitions.onRework; + if (reworkTarget === null) { + return null; + } + + const maxRework = currentStage.maxRework ?? Number.POSITIVE_INFINITY; + const currentCount = this.state.issueReworkCounts[issueId] ?? 0; + + if (currentCount >= maxRework) { + // Exceeded max rework — escalate to completed/terminal + delete this.state.issueStages[issueId]; + delete this.state.issueReworkCounts[issueId]; + delete this.state.issueExecutionHistory[issueId]; + delete this.state.issueFirstDispatchedAt[issueId]; + this.state.failed.add(issueId); + this.releaseClaim(issueId); + return "escalated"; + } + + this.state.issueReworkCounts[issueId] = currentCount + 1; + this.state.issueStages[issueId] = reworkTarget; + return reworkTarget; + } + onCodexEvent(input: { issueId: string; event: CodexClientEvent; @@ -350,6 +1195,52 @@ export class OrchestratorCore { return { applied: true }; } + /** + * Check if any non-terminal pipeline-halt issues exist. + * Prefers fetchOpenIssuesByLabels (server-side filtering) when available, + * falls back to fetchIssuesByLabels with client-side filtering. + * Returns the first open halt issue, or null if none / on error (fail-open). + */ + private async checkPipelineHalt(): Promise<Issue | null> { + if (this.tracker.fetchOpenIssuesByLabels !== undefined) { + try { + const haltIssues = await this.tracker.fetchOpenIssuesByLabels( + ["pipeline-halt"], + this.config.tracker.terminalStates, + ); + return haltIssues[0] ?? null; + } catch (error) { + console.warn( + "[orchestrator] fetchOpenIssuesByLabels failed, falling back to fetchIssuesByLabels.", + error, + ); + } + } + + if (this.tracker.fetchIssuesByLabels !== undefined) { + try { + const haltIssues = await this.tracker.fetchIssuesByLabels([ + "pipeline-halt", + ]); + const terminalStates = toNormalizedStateSet( + this.config.tracker.terminalStates, + ); + const openHaltIssue = haltIssues.find((haltIssue) => { + const normalizedState = normalizeIssueState(haltIssue.state); + return !terminalStates.has(normalizedState); + }); + return openHaltIssue ?? null; + } catch (error) { + console.warn( + "[orchestrator] Failed to check for pipeline-halt issues. Continuing dispatch.", + error, + ); + } + } + + return null; + } + private syncStateFromConfig(): void { this.state.pollIntervalMs = this.config.polling.intervalMs; this.state.maxConcurrentAgents = this.config.agent.maxConcurrentAgents; @@ -411,17 +1302,127 @@ export class OrchestratorCore { issue: Issue, attempt: number | null, ): Promise<boolean> { + const stagesConfig = this.config.stages; + let stage: StageDefinition | null = null; + let stageName: string | null = null; + + if (stagesConfig !== null) { + const cachedStage = this.state.issueStages[issue.id]; + if (cachedStage !== undefined) { + stageName = cachedStage; + } else if ( + stagesConfig.fastTrack != null && + issue.labels.includes(stagesConfig.fastTrack.label) + ) { + stageName = stagesConfig.fastTrack.initialStage; + console.log( + `[orchestrator] Fast-tracking ${issue.identifier} to ${stageName} (label: ${stagesConfig.fastTrack.label})`, + ); + } else { + stageName = stagesConfig.initialStage; + } + stage = stagesConfig.stages[stageName] ?? null; + + if (stage !== null && stage.type === "terminal") { + this.state.completed.add(issue.id); + this.releaseClaim(issue.id); + delete this.state.issueStages[issue.id]; + delete this.state.issueReworkCounts[issue.id]; + delete this.state.issueFirstDispatchedAt[issue.id]; + // Fire linearState update for the terminal stage (e.g., move to "Done") + if (stage.linearState !== null && this.updateIssueState !== undefined) { + void this.updateIssueState( + issue.id, + issue.identifier, + stage.linearState, + ).catch((err) => { + console.warn( + `[orchestrator] Failed to update terminal state for ${issue.identifier}:`, + err, + ); + }); + } + return false; + } + + if (stage !== null && stage.type === "gate") { + this.state.issueStages[issue.id] = stageName; + this.state.claimed.add(issue.id); + + if (stage.linearState !== null && this.updateIssueState !== undefined) { + try { + await this.updateIssueState( + issue.id, + issue.identifier, + stage.linearState, + ); + } catch (err) { + console.warn( + `[orchestrator] Failed to update issue state for ${issue.identifier}:`, + err, + ); + } + } + + if ( + stage.gateType === "ensemble" && + this.runEnsembleGate !== undefined + ) { + // Fire ensemble gate asynchronously — resolve transitions on completion. + void this.handleEnsembleGate(issue, stage); + } + // Human gates (or ensemble gates without handler): stay in gate state. + return false; + } + + // Track the issue's current stage + this.state.issueStages[issue.id] = stageName; + + if ( + stage?.linearState !== null && + stage?.linearState !== undefined && + this.updateIssueState !== undefined + ) { + try { + await this.updateIssueState( + issue.id, + issue.identifier, + stage.linearState, + ); + } catch (err) { + console.warn( + `[orchestrator] Failed to update issue state for ${issue.identifier}:`, + err, + ); + } + } + } + + if (!this.state.issueFirstDispatchedAt[issue.id]) { + this.state.issueFirstDispatchedAt[issue.id] = formatEasternTimestamp( + this.now(), + ); + } + try { - const spawned = await this.spawnWorker({ issue, attempt }); - this.state.running[issue.id] = { + const reworkCount = this.state.issueReworkCounts[issue.id] ?? 0; + const spawned = await this.spawnWorker({ + issue, + attempt, + stage, + stageName, + reworkCount, + }); + const runEntry: RunningEntry = { ...createEmptyLiveSession(), issue, identifier: issue.identifier, retryAttempt: normalizeRetryAttempt(attempt), - startedAt: this.now().toISOString(), + startedAt: formatEasternTimestamp(this.now()), workerHandle: spawned.workerHandle, monitorHandle: spawned.monitorHandle, }; + this.state.running[issue.id] = runEntry; this.state.claimed.add(issue.id); this.clearRetryEntry(issue.id); return true; @@ -472,9 +1473,11 @@ export class OrchestratorCore { const normalizedState = normalizeIssueState(snapshot.state); if (terminalStates.has(normalizedState)) { - stopRequests.push( - await this.requestStop(runningEntry, true, "terminal_state"), - ); + if (!this.isWorkerInFinalActiveStage(snapshot.id)) { + stopRequests.push( + await this.requestStop(runningEntry, true, "terminal_state"), + ); + } continue; } @@ -514,6 +1517,43 @@ export class OrchestratorCore { }; } + /** + * Returns true if the worker for the given issue is in the final active + * stage — i.e., its onComplete target is null or points to a terminal stage. + * In that case, the worker itself drove the issue to terminal state and + * should be allowed to finish gracefully rather than being stopped. + */ + private isWorkerInFinalActiveStage(issueId: string): boolean { + const stagesConfig = this.config.stages; + if (stagesConfig === null) { + return false; + } + + const currentStageName = this.state.issueStages[issueId]; + if (currentStageName === undefined) { + // Stage already cleaned up by advanceStage (completed) — the worker + // is finishing its final stage. Allow it to complete gracefully. + return true; + } + + const currentStage = stagesConfig.stages[currentStageName]; + if (currentStage === undefined) { + return false; + } + + const nextStageName = currentStage.transitions.onComplete; + if (nextStageName === null) { + return true; + } + + const nextStage = stagesConfig.stages[nextStageName]; + if (nextStage === undefined) { + return false; + } + + return nextStage.type === "terminal"; + } + private async reconcileStalledRuns(): Promise<StopRequest[]> { if (this.config.codex.stallTimeoutMs <= 0) { return []; @@ -570,7 +1610,26 @@ export class OrchestratorCore { error: string | null; delayType: "continuation" | "failure"; }, - ): RetryEntry { + ): RetryEntry | null { + // Max retry guard — only applies to failure retries, not continuations + if ( + input.delayType === "failure" && + attempt > this.config.agent.maxRetryAttempts + ) { + this.state.failed.add(issueId); + this.releaseClaim(issueId); + delete this.state.issueStages[issueId]; + delete this.state.issueReworkCounts[issueId]; + delete this.state.issueExecutionHistory[issueId]; + delete this.state.issueFirstDispatchedAt[issueId]; + void this.fireEscalationSideEffects( + issueId, + input.identifier ?? issueId, + `Max retry attempts (${this.config.agent.maxRetryAttempts}) exceeded. Escalating for manual review.`, + ); + return null; + } + this.clearRetryEntry(issueId); const delayMs = @@ -592,6 +1651,7 @@ export class OrchestratorCore { dueAtMs, timerHandle, error: input.error, + delayType: input.delayType, }; this.state.claimed.add(issueId); @@ -688,6 +1748,32 @@ function toNormalizedStateSet(states: readonly string[]): Set<string> { return new Set(states.map((state) => normalizeIssueState(state))); } +export function classifyExitOutcome( + outcome: WorkerExitOutcome, + turnCount: number, + reason: string | undefined, +): string { + if (outcome === "normal") { + return "normal"; + } + // Already classified — pass through + if ( + outcome === "failed_to_start" || + outcome === "timed_out" || + outcome === "error" + ) { + return outcome; + } + // Classify "abnormal" based on context + if (turnCount === 0) { + return "failed_to_start"; + } + if (reason?.includes("stall_timeout")) { + return "timed_out"; + } + return "error"; +} + function defaultTimerScheduler(): TimerScheduler { return { set(callback, delayMs) { diff --git a/src/orchestrator/gate-handler.ts b/src/orchestrator/gate-handler.ts new file mode 100644 index 00000000..c8ecdac6 --- /dev/null +++ b/src/orchestrator/gate-handler.ts @@ -0,0 +1,485 @@ +import { execFileSync } from "node:child_process"; + +import type { AgentRunnerCodexClient } from "../agent/runner.js"; +import type { CodexTurnResult } from "../codex/app-server-client.js"; +import type { ReviewerDefinition, StageDefinition } from "../config/types.js"; +import type { ExecutionHistory, Issue } from "../domain/model.js"; +import { getDisplayVersion } from "../version.js"; + +/** + * Known rate-limit / quota-exhaustion phrases that may appear in reviewer + * output when the model returns a 200 with an error body instead of throwing. + * Checked case-insensitively against raw output in parseReviewerOutput. + */ +export const RATE_LIMIT_PATTERNS: readonly string[] = [ + "you have exhausted your capacity", + "resource has been exhausted", + "rate limit", + "quota exceeded", +]; + +/** + * Single reviewer verdict — the minimal JSON layer of the two-layer output. + * "error" means the reviewer failed to execute (rate limit, network, etc.) + * and should not count as a code review failure. + */ +export interface ReviewerVerdict { + role: string; + model: string; + verdict: "pass" | "fail" | "error"; +} + +/** + * Full result from a single reviewer: verdict JSON + plain text feedback. + */ +export interface ReviewerResult { + reviewer: ReviewerDefinition; + verdict: ReviewerVerdict; + feedback: string; + raw: string; +} + +/** + * Aggregate result from all reviewers. + */ +export type AggregateVerdict = "pass" | "fail"; + +export interface EnsembleGateResult { + aggregate: AggregateVerdict; + results: ReviewerResult[]; + comment: string; +} + +/** + * Factory function type for creating a runner client for a reviewer. + */ +export type CreateReviewerClient = ( + reviewer: ReviewerDefinition, +) => AgentRunnerCodexClient; + +/** + * Function type for posting a comment to an issue tracker. + */ +export type PostComment = (issueId: string, body: string) => Promise<void>; + +export interface EnsembleGateHandlerOptions { + issue: Issue; + stage: StageDefinition; + createReviewerClient: CreateReviewerClient; + postComment?: PostComment; + workspacePath?: string; + /** Override retry base delay (ms) for testing. Default: 5000. */ + retryBaseDelayMs?: number; +} + +/** + * Run the ensemble gate: spawn N reviewers in parallel, aggregate verdicts. + */ +export async function runEnsembleGate( + options: EnsembleGateHandlerOptions, +): Promise<EnsembleGateResult> { + const { issue, stage, createReviewerClient, postComment, workspacePath } = + options; + const reviewers = stage.reviewers; + + if (reviewers.length === 0) { + return { + aggregate: "pass", + results: [], + comment: "No reviewers configured — auto-passing gate.", + }; + } + + const diff = workspacePath ? getDiff(workspacePath) : null; + const retryBaseDelayMs = + options.retryBaseDelayMs ?? REVIEWER_RETRY_BASE_DELAY_MS; + + const results = await Promise.all( + reviewers.map((reviewer) => + runSingleReviewer( + reviewer, + issue, + createReviewerClient, + diff, + retryBaseDelayMs, + ), + ), + ); + + const aggregate = aggregateVerdicts(results); + const comment = formatGateComment(aggregate, results); + + if (postComment !== undefined) { + try { + await postComment(issue.id, comment); + } catch { + // Comment posting is best-effort — don't fail the gate on it. + } + } + + return { aggregate, results, comment }; +} + +/** + * Aggregate individual verdicts. + * - Any explicit "fail" verdict (from a reviewer that actually ran) = FAIL. + * - If ALL reviewers errored (no pass or fail verdicts), = FAIL (can't skip review). + * - Otherwise (all pass/error with at least one pass) = PASS. + */ +export function aggregateVerdicts(results: ReviewerResult[]): AggregateVerdict { + if (results.length === 0) { + return "pass"; + } + + const hasExplicitFail = results.some((r) => r.verdict.verdict === "fail"); + if (hasExplicitFail) { + return "fail"; + } + + const hasAnyNonError = results.some((r) => r.verdict.verdict !== "error"); + if (!hasAnyNonError) { + // All reviewers errored — can't skip review entirely + return "fail"; + } + + return "pass"; +} + +/** + * Maximum number of retry attempts for transient reviewer errors + * (rate limits, network timeouts, etc.) + */ +export const MAX_REVIEWER_RETRIES = 3; + +/** + * Delay between retry attempts in ms (doubles each attempt). + */ +export const REVIEWER_RETRY_BASE_DELAY_MS = 5_000; + +/** + * Run a single reviewer with retries for transient errors. + * Infrastructure failures (rate limits, network) are retried up to MAX_REVIEWER_RETRIES times. + * If all retries fail, returns an "error" verdict instead of "fail" so it doesn't + * block the gate on infrastructure issues. + */ +async function runSingleReviewer( + reviewer: ReviewerDefinition, + issue: Issue, + createReviewerClient: CreateReviewerClient, + diff: string | null, + retryBaseDelayMs: number = REVIEWER_RETRY_BASE_DELAY_MS, +): Promise<ReviewerResult> { + const prompt = buildReviewerPrompt(reviewer, issue, diff); + const title = `Review: ${issue.identifier} (${reviewer.role})`; + let lastError = ""; + + for (let attempt = 0; attempt <= MAX_REVIEWER_RETRIES; attempt++) { + const client = createReviewerClient(reviewer); + try { + const result: CodexTurnResult = await client.startSession({ + prompt, + title, + }); + const raw = result.message ?? ""; + return parseReviewerOutput(reviewer, raw); + } catch (error) { + lastError = + error instanceof Error ? error.message : "Reviewer process failed"; + // Close client before retry + try { + await client.close(); + } catch { + /* best-effort */ + } + + if (attempt < MAX_REVIEWER_RETRIES) { + const delay = retryBaseDelayMs * 2 ** attempt; + await new Promise((resolve) => setTimeout(resolve, delay)); + } + } finally { + try { + await client.close(); + } catch { + // Best-effort cleanup. + } + } + } + + // All retries exhausted — infrastructure failure, not a code review failure. + return { + reviewer, + verdict: { + role: reviewer.role, + model: reviewer.model ?? "unknown", + verdict: "error", + }, + feedback: `Failed after ${MAX_REVIEWER_RETRIES + 1} attempts. Last error: ${lastError}`, + raw: "", + }; +} + +/** + * Fetch the git diff for the workspace (origin/main...HEAD). + * Returns the diff string, truncated to maxChars. Returns empty string on failure. + */ +const MAX_DIFF_CHARS = 12_000; + +export function getDiff( + workspacePath: string, + maxChars = MAX_DIFF_CHARS, +): string { + try { + const raw = execFileSync("git", ["diff", "origin/main...HEAD"], { + cwd: workspacePath, + encoding: "utf-8", + maxBuffer: 2 * 1024 * 1024, + timeout: 15_000, + }); + if (raw.length <= maxChars) { + return raw; + } + return `${raw.slice(0, maxChars)}\n\n... (diff truncated)`; + } catch { + return ""; + } +} + +/** + * Build the prompt for a reviewer. Includes issue metadata, role context, + * the actual PR diff, and the reviewer's prompt field as inline instructions. + */ +function buildReviewerPrompt( + reviewer: ReviewerDefinition, + issue: Issue, + diff: string | null, +): string { + const lines = [ + `You are a code reviewer with the role: ${reviewer.role}.`, + "", + "## Issue", + `- Identifier: ${issue.identifier}`, + `- Title: ${issue.title}`, + ...(issue.description ? [`- Description: ${issue.description}`] : []), + ...(issue.url ? [`- URL: ${issue.url}`] : []), + ]; + + if (diff && diff.length > 0) { + lines.push("", "## Code Changes (git diff)", "```diff", diff, "```"); + } + + if (reviewer.prompt) { + lines.push("", "## Review Focus", reviewer.prompt); + } + + lines.push( + "", + "## Instructions", + "Review the code changes above for this issue. Respond with TWO sections:", + "", + "1. A JSON verdict line (must be valid JSON on a single line):", + "```", + `{"role": "${reviewer.role}", "model": "${reviewer.model ?? "unknown"}", "verdict": "pass"}`, + "```", + `Set verdict to "pass" if the changes look good, or "fail" if there are issues.`, + "", + "2. Plain text feedback explaining your assessment.", + ); + + return lines.join("\n"); +} + +/** + * Parse reviewer output into verdict JSON + feedback text. + * Expects the output to contain a JSON line with {role, model, verdict} + * followed by plain text feedback. + */ +export function parseReviewerOutput( + reviewer: ReviewerDefinition, + raw: string, +): ReviewerResult { + const defaultVerdict: ReviewerVerdict = { + role: reviewer.role, + model: reviewer.model ?? "unknown", + verdict: "fail", + }; + + if (raw.trim().length === 0) { + return { + reviewer, + verdict: defaultVerdict, + feedback: "Reviewer returned empty output — treating as fail.", + raw, + }; + } + + // Try to find a JSON verdict in the output + const verdictMatch = raw.match( + /\{[^}]*"verdict"\s*:\s*"(?:pass|fail)"[^}]*\}/, + ); + if (verdictMatch === null) { + // Check for rate-limit text before defaulting to "fail" + const lower = raw.toLowerCase(); + const isRateLimited = RATE_LIMIT_PATTERNS.some((p) => lower.includes(p)); + if (isRateLimited) { + return { + reviewer, + verdict: { + role: reviewer.role, + model: reviewer.model ?? "unknown", + verdict: "error", + }, + feedback: raw.trim(), + raw, + }; + } + return { + reviewer, + verdict: defaultVerdict, + feedback: raw.trim(), + raw, + }; + } + + try { + const parsed = JSON.parse(verdictMatch[0]) as Record<string, unknown>; + const verdict: ReviewerVerdict = { + role: typeof parsed.role === "string" ? parsed.role : reviewer.role, + model: + typeof parsed.model === "string" + ? parsed.model + : (reviewer.model ?? "unknown"), + verdict: parsed.verdict === "pass" ? "pass" : "fail", + }; + + // Feedback is everything except the JSON line + const feedback = raw + .replace(verdictMatch[0], "") + .replace(/```/g, "") + .trim(); + + return { + reviewer, + verdict, + feedback: feedback.length > 0 ? feedback : "No additional feedback.", + raw, + }; + } catch { + return { + reviewer, + verdict: defaultVerdict, + feedback: raw.trim(), + raw, + }; + } +} + +/** + * Format a review findings comment for posting to the issue tracker when an + * agent-type stage reports a review failure. Follows the formatGateComment() + * markdown style. + */ +export function formatReviewFindingsComment( + issueIdentifier: string, + stageName: string, + agentMessage: string, +): string { + const sections = [ + "## Review Findings", + "", + `**Stage:** ${stageName}`, + `**Issue:** ${issueIdentifier}`, + ]; + if (agentMessage.trim() !== "") { + sections.push("", agentMessage); + } + return sections.join("\n"); +} + +/** + * Format a rebase-needed comment for posting to the issue tracker when a + * merge-stage agent reports a rebase failure. Follows the + * formatReviewFindingsComment() markdown style. + */ +export function formatRebaseComment( + issueIdentifier: string, + stageName: string, + agentMessage: string, +): string { + const sections = [ + "## Rebase Needed", + "", + `**Stage:** ${stageName}`, + `**Issue:** ${issueIdentifier}`, + ]; + if (agentMessage.trim() !== "") { + sections.push("", agentMessage); + } + return sections.join("\n"); +} + +/** + * Format the aggregate gate result as a markdown comment for Linear. + */ +export function formatGateComment( + aggregate: AggregateVerdict, + results: ReviewerResult[], +): string { + const header = + aggregate === "pass" + ? "## Ensemble Review: PASS" + : "## Ensemble Review: FAIL"; + + const sections = results.map((r) => { + const iconMap = { pass: "PASS", fail: "FAIL", error: "ERROR" } as const; + const icon = iconMap[r.verdict.verdict] ?? "FAIL"; + return [ + `### ${r.verdict.role} (${r.verdict.model}): ${icon}`, + "", + r.feedback, + ].join("\n"); + }); + + return [header, "", ...sections].join("\n"); +} + +/** + * Format an execution report as a markdown comment for Linear. + * Generates a stage timeline table from ExecutionHistory and includes + * total tokens and optional rework count. + */ +export function formatExecutionReport( + issueIdentifier: string, + history: ExecutionHistory, + reworkCount?: number, +): string { + const lines: string[] = [ + "## Execution Report", + "", + `**Issue:** ${issueIdentifier}`, + ]; + + if (reworkCount !== undefined && reworkCount > 0) { + lines.push(`**Rework count:** ${reworkCount}`); + } + + lines.push( + "", + "| Stage | Duration | Tokens | Turns | Outcome |", + "|-------|----------|--------|-------|---------|", + ); + + let totalTokens = 0; + for (const record of history) { + const durationSec = Math.round(record.durationMs / 1000); + totalTokens += record.totalTokens; + lines.push( + `| ${record.stageName} | ${durationSec}s | ${record.totalTokens.toLocaleString("en-US")} | ${record.turns} | ${record.outcome} |`, + ); + } + + lines.push("", `**Total tokens:** ${totalTokens.toLocaleString("en-US")}`); + + lines.push("", "---", `_symphony-ts v${getDisplayVersion()}_`); + + return lines.join("\n"); +} diff --git a/src/orchestrator/pipeline-notifier.ts b/src/orchestrator/pipeline-notifier.ts new file mode 100644 index 00000000..9b9753be --- /dev/null +++ b/src/orchestrator/pipeline-notifier.ts @@ -0,0 +1,259 @@ +/** + * Pipeline notification module. + * + * Best-effort Slack notifications for high-value pipeline events. + * Failures are logged and swallowed — never affect pipeline correctness. + */ + +import type { ExecutionHistory } from "../domain/model.js"; +import { getDisplayVersion } from "../version.js"; + +// --------------------------------------------------------------------------- +// Event types (discriminated union) +// --------------------------------------------------------------------------- + +export interface PipelineStartedEvent { + type: "pipeline_started"; + productName: string; + dashboardUrl: string | null; +} + +export interface PipelineStoppedEvent { + type: "pipeline_stopped"; + productName: string; + completedCount: number; + failedCount: number; + durationMs: number; +} + +export interface IssueCompletedEvent { + type: "issue_completed"; + issueIdentifier: string; + issueTitle: string; + issueUrl: string | null; + executionHistory: ExecutionHistory; + reworkCount: number; + totalTokens: number; + totalDurationMs: number; +} + +export interface IssueFailedEvent { + type: "issue_failed"; + issueIdentifier: string; + issueTitle: string; + issueUrl: string | null; + failureReason: string | null; + retriesExhausted: boolean; + retryAttempt: number | null; +} + +export interface StallKilledEvent { + type: "stall_killed"; + issueIdentifier: string; + issueTitle: string; + stageName: string | null; + stallDurationMs: number; +} + +export interface InfraErrorEvent { + type: "infra_error"; + issueIdentifier: string; + issueTitle: string; + errorReason: string; +} + +export type PipelineNotificationEvent = + | PipelineStartedEvent + | PipelineStoppedEvent + | IssueCompletedEvent + | IssueFailedEvent + | StallKilledEvent + | InfraErrorEvent; + +// --------------------------------------------------------------------------- +// Formatting helpers +// --------------------------------------------------------------------------- + +export function formatDurationMs(ms: number): string { + const totalSeconds = Math.round(ms / 1000); + if (totalSeconds < 60) { + return `${totalSeconds}s`; + } + const minutes = Math.floor(totalSeconds / 60); + const seconds = totalSeconds % 60; + if (minutes < 60) { + return seconds > 0 ? `${minutes}m ${seconds}s` : `${minutes}m`; + } + const hours = Math.floor(minutes / 60); + const remainingMinutes = minutes % 60; + return remainingMinutes > 0 ? `${hours}h ${remainingMinutes}m` : `${hours}h`; +} + +export function formatStageTimeline(history: ExecutionHistory): string { + if (history.length === 0) { + return "_No stage data_"; + } + + return history + .map( + (record) => + `${record.stageName}: ${formatDurationMs(record.durationMs)} · ${record.totalTokens.toLocaleString("en-US")} tokens · ${record.outcome}`, + ) + .join("\n"); +} + +// --------------------------------------------------------------------------- +// Message formatter +// --------------------------------------------------------------------------- + +export function formatNotification(event: PipelineNotificationEvent): string { + const version = `_symphony-ts v${getDisplayVersion()}_`; + + switch (event.type) { + case "pipeline_started": { + const parts = [`:rocket: *Pipeline started* — ${event.productName}`]; + if (event.dashboardUrl !== null) { + parts.push(`Dashboard: ${event.dashboardUrl}`); + } + parts.push(version); + return parts.join("\n"); + } + + case "pipeline_stopped": { + const total = event.completedCount + event.failedCount; + return [ + `:stop_sign: *Pipeline stopped* — ${event.productName}`, + `Completed: ${event.completedCount} · Failed: ${event.failedCount} · Total: ${total}`, + `Duration: ${formatDurationMs(event.durationMs)}`, + version, + ].join("\n"); + } + + case "issue_completed": { + const parts = [ + `:white_check_mark: *Issue completed* — ${event.issueIdentifier}`, + `*${event.issueTitle}*`, + ]; + if (event.issueUrl !== null) { + parts.push(event.issueUrl); + } + if (event.executionHistory.length > 0) { + parts.push("", formatStageTimeline(event.executionHistory)); + } + parts.push( + "", + `Total: ${formatDurationMs(event.totalDurationMs)} · ${event.totalTokens.toLocaleString("en-US")} tokens`, + ); + if (event.reworkCount > 0) { + parts.push(`Rework cycles: ${event.reworkCount}`); + } + parts.push(version); + return parts.join("\n"); + } + + case "issue_failed": { + const parts = [ + `:x: *Issue failed* — ${event.issueIdentifier}`, + `*${event.issueTitle}*`, + ]; + if (event.issueUrl !== null) { + parts.push(event.issueUrl); + } + if (event.failureReason !== null) { + parts.push(`Reason: ${event.failureReason}`); + } + if (event.retriesExhausted) { + parts.push(`Retries exhausted (attempt ${event.retryAttempt ?? "?"})`); + } + parts.push(version); + return parts.join("\n"); + } + + case "stall_killed": { + const parts = [ + `:warning: *Stall killed* — ${event.issueIdentifier}`, + `*${event.issueTitle}*`, + ]; + if (event.stageName !== null) { + parts.push(`Stage: ${event.stageName}`); + } + parts.push(`Stalled for: ${formatDurationMs(event.stallDurationMs)}`); + parts.push(version); + return parts.join("\n"); + } + + case "infra_error": { + return [ + `:rotating_light: *Infra error* — ${event.issueIdentifier}`, + `*${event.issueTitle}*`, + `Error: ${event.errorReason}`, + version, + ].join("\n"); + } + } +} + +// --------------------------------------------------------------------------- +// Poster interface & Slack factory +// --------------------------------------------------------------------------- + +export interface NotificationPoster { + post(channel: string, text: string): Promise<void>; +} + +export function createSlackPoster(input: { + botToken: string; +}): NotificationPoster { + // Lazy-import to avoid pulling @slack/web-api into test bundles + // when using mock posters. + let clientPromise: Promise<import("@slack/web-api").WebClient> | null = null; + + const getClient = () => { + if (clientPromise === null) { + clientPromise = import("@slack/web-api").then( + ({ WebClient }) => new WebClient(input.botToken), + ); + } + return clientPromise; + }; + + return { + async post(channel: string, text: string): Promise<void> { + const client = await getClient(); + await client.chat.postMessage({ channel, text }); + }, + }; +} + +// --------------------------------------------------------------------------- +// PipelineNotifier — best-effort delivery +// --------------------------------------------------------------------------- + +export interface PipelineNotificationSink { + notify(event: PipelineNotificationEvent): void; +} + +export interface PipelineNotifierOptions { + channel: string; + poster: NotificationPoster; + onError?: (error: unknown) => void; +} + +export class PipelineNotifier implements PipelineNotificationSink { + private readonly channel: string; + private readonly poster: NotificationPoster; + private readonly onError: (error: unknown) => void; + + constructor(options: PipelineNotifierOptions) { + this.channel = options.channel; + this.poster = options.poster; + this.onError = options.onError ?? (() => {}); + } + + notify(event: PipelineNotificationEvent): void { + const text = formatNotification(event); + void this.poster.post(this.channel, text).catch((error) => { + this.onError(error); + }); + } +} diff --git a/src/orchestrator/runtime-host.ts b/src/orchestrator/runtime-host.ts index fdc080b4..c979ee9b 100644 --- a/src/orchestrator/runtime-host.ts +++ b/src/orchestrator/runtime-host.ts @@ -3,13 +3,26 @@ import { access, mkdir } from "node:fs/promises"; import { join } from "node:path"; import type { Writable } from "node:stream"; -import type { AgentRunResult, AgentRunnerEvent } from "../agent/runner.js"; +import type { + AgentRunInput, + AgentRunResult, + AgentRunnerEvent, +} from "../agent/runner.js"; import { AgentRunner } from "../agent/runner.js"; import { validateDispatchConfig } from "../config/config-resolver.js"; -import type { ResolvedWorkflowConfig } from "../config/types.js"; +import type { + ResolvedWorkflowConfig, + StageDefinition, +} from "../config/types.js"; import { WorkflowWatcher } from "../config/workflow-watch.js"; -import type { Issue, RetryEntry, RunningEntry } from "../domain/model.js"; +import type { + ExecutionHistory, + Issue, + RetryEntry, + RunningEntry, +} from "../domain/model.js"; import { ERROR_CODES } from "../errors/codes.js"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; import { type RuntimeSnapshot, buildRuntimeSnapshot, @@ -25,8 +38,11 @@ import { type RefreshResponse, startDashboardServer, } from "../observability/dashboard-server.js"; +import { createRunnerFromConfig, isAiSdkRunner } from "../runners/factory.js"; +import type { RunnerKind } from "../runners/types.js"; import { LinearTrackerClient } from "../tracker/linear-client.js"; import type { IssueTracker } from "../tracker/tracker.js"; +import { getDisplayVersion } from "../version.js"; import { WorkspaceHookRunner } from "../workspace/hooks.js"; import { WorkspaceManager } from "../workspace/workspace-manager.js"; import type { @@ -35,13 +51,11 @@ import type { TimerScheduler, } from "./core.js"; import { OrchestratorCore } from "./core.js"; +import { runEnsembleGate } from "./gate-handler.js"; +import type { PipelineNotificationSink } from "./pipeline-notifier.js"; export interface AgentRunnerLike { - run(input: { - issue: Issue; - attempt: number | null; - signal?: AbortSignal; - }): Promise<AgentRunResult>; + run(input: AgentRunInput): Promise<AgentRunResult>; } export interface RuntimeHostOptions { @@ -53,6 +67,7 @@ export interface RuntimeHostOptions { }) => AgentRunnerLike; logger?: StructuredLogger; workspaceManager?: WorkspaceManager; + notifier?: PipelineNotificationSink | null; now?: () => Date; } @@ -63,9 +78,11 @@ export interface RuntimeServiceOptions { runtimeHost?: OrchestratorRuntimeHost; workspaceManager?: WorkspaceManager; workflowWatcher?: WorkflowWatcher | null; + notifier?: PipelineNotificationSink | null; now?: () => Date; logger?: StructuredLogger; stdout?: Writable; + shutdownTimeoutMs?: number; } export interface RuntimeServiceHandle { @@ -79,12 +96,16 @@ export interface RuntimeServiceHandle { interface WorkerExecution { issueId: string; issueIdentifier: string; + stageName: string | null; controller: AbortController; completion: Promise<void>; stopRequest: StopRequest | null; lastResult: AgentRunResult | null; } +/** Maximum ms to wait for idle workers during shutdown before forcing exit. */ +const SHUTDOWN_IDLE_TIMEOUT_MS = 30_000; + export class RuntimeHostStartupError extends Error { readonly code: string; @@ -122,9 +143,12 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { private readonly snapshotListeners = new Set<() => void>(); + readonly notifier: PipelineNotificationSink | null; + constructor(options: RuntimeHostOptions) { this.config = options.config; this.tracker = options.tracker; + this.notifier = options.notifier ?? null; this.now = options.now ?? (() => new Date()); this.logger = options.logger ?? null; this.workspaceManager = @@ -166,8 +190,48 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { tracker: options.tracker, now: this.now, timerScheduler, - spawnWorker: async ({ issue, attempt }) => - this.spawnWorkerExecution(issue, attempt), + ...(this.tracker instanceof LinearTrackerClient + ? { + postComment: async (issueId: string, body: string) => { + await (this.tracker as LinearTrackerClient).postComment( + issueId, + body, + ); + }, + updateIssueState: async ( + issueId: string, + issueIdentifier: string, + stateName: string, + ) => { + const teamKey = issueIdentifier.split("-")[0] ?? issueIdentifier; + await (this.tracker as LinearTrackerClient).updateIssueState( + issueId, + stateName, + teamKey, + ); + }, + autoCloseParentIssue: async ( + issueId: string, + issueIdentifier: string, + ) => { + const teamKey = issueIdentifier.split("-")[0] ?? issueIdentifier; + const terminalStates = options.config.tracker.terminalStates; + await (this.tracker as LinearTrackerClient).checkAndCloseParent( + issueId, + terminalStates, + teamKey, + ); + }, + } + : {}), + spawnWorker: async ({ issue, attempt, stage, stageName, reworkCount }) => + this.spawnWorkerExecution( + issue, + attempt, + stage, + stageName, + reworkCount, + ), stopRunningIssue: async (input) => { await this.stopWorkerExecution(input.issueId, { issueId: input.issueId, @@ -176,6 +240,40 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { reason: input.reason, }); }, + runEnsembleGate: async ({ issue, stage }) => { + const workspaceInfo = this.workspaceManager.resolveForIssue(issue.id); + const gateOptions = { + issue, + stage, + workspacePath: workspaceInfo.workspacePath, + createReviewerClient: ( + reviewer: import("../config/types.js").ReviewerDefinition, + ) => { + const kind = (reviewer.runner ?? + options.config.runner.kind) as RunnerKind; + if (!isAiSdkRunner(kind)) { + throw new Error( + `Reviewer runner kind "${kind}" is not an AI SDK runner — only claude-code and gemini are supported for ensemble review.`, + ); + } + return createRunnerFromConfig({ + config: { kind, model: reviewer.model }, + cwd: workspaceInfo.workspacePath, + onEvent: () => {}, + }); + }, + }; + if (this.tracker instanceof LinearTrackerClient) { + const tracker = this.tracker; + return runEnsembleGate({ + ...gateOptions, + postComment: async (issueId: string, body: string) => { + await tracker.postComment(issueId, body); + }, + }); + } + return runEnsembleGate(gateOptions); + }, }; this.orchestrator = new OrchestratorCore(orchestratorOptions); @@ -272,7 +370,7 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { } async requestRefresh(): Promise<RefreshResponse> { - const requestedAt = this.now().toISOString(); + const requestedAt = formatEasternTimestamp(this.now()); const coalesced = this.refreshQueued; this.refreshQueued = true; @@ -298,9 +396,20 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { }; } + abortAllWorkers(): number { + const count = this.workers.size; + for (const worker of this.workers.values()) { + worker.controller.abort("Shutdown: aborting running workers."); + } + return count; + } + private async spawnWorkerExecution( issue: Issue, attempt: number | null, + stage: StageDefinition | null = null, + stageName: string | null = null, + reworkCount = 0, ): Promise<{ workerHandle: WorkerExecution; monitorHandle: Promise<void>; @@ -311,23 +420,39 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { issue_identifier: issue.identifier, attempt, state: issue.state, + ...(stageName !== null ? { stage: stageName } : {}), }); const controller = new AbortController(); const execution: WorkerExecution = { issueId: issue.id, issueIdentifier: issue.identifier, + stageName, controller, stopRequest: null, lastResult: null, completion: Promise.resolve(), }; + await this.logger?.info( + "agent_runner_starting", + "Agent runner starting for issue.", + { + outcome: "started", + issue_id: issue.id, + issue_identifier: issue.identifier, + ...(stageName !== null ? { stage: stageName } : {}), + }, + ); + const completion = this.agentRunner .run({ issue, attempt, signal: controller.signal, + stage, + stageName, + reworkCount, }) .then(async (result) => { execution.lastResult = result; @@ -339,6 +464,12 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { }); }) .catch(async (error) => { + await this.logger?.error("agent_runner_error", toErrorMessage(error), { + outcome: "failed", + issue_id: issue.id, + issue_identifier: issue.identifier, + ...(stageName !== null ? { stage: stageName } : {}), + }); await this.enqueue(async () => { await this.finalizeWorkerExecution(execution, { outcome: "abnormal", @@ -399,16 +530,217 @@ export class OrchestratorRuntimeHost implements DashboardServerHost { }, ); + // Pre-capture data that advanceStage() deletes during onWorkerExit() + const state = this.orchestrator.getState(); + const runningEntry = state.running[execution.issueId]; + + // Compute durationMs using runAttempt.startedAt if available (normal completion case), + // falling back to runningEntry.startedAt for abnormal cases (stall timeout where runAttempt is null). + const durationMs = execution.lastResult?.runAttempt?.startedAt + ? this.now().getTime() - + new Date(execution.lastResult.runAttempt.startedAt).getTime() + : runningEntry?.startedAt + ? this.now().getTime() - new Date(runningEntry.startedAt).getTime() + : 0; + + const liveSession = execution.lastResult?.liveSession; + await this.logger?.log("info", "stage_completed", "Stage completed.", { + issue_id: execution.issueId, + issue_identifier: execution.issueIdentifier, + session_id: liveSession?.sessionId ?? null, + stage_name: execution.stageName, + input_tokens: liveSession?.codexInputTokens ?? 0, + output_tokens: liveSession?.codexOutputTokens ?? 0, + total_tokens: liveSession?.codexTotalTokens ?? 0, + ...(liveSession?.codexCacheReadTokens + ? { cache_read_tokens: liveSession.codexCacheReadTokens } + : {}), + ...(liveSession?.codexCacheWriteTokens + ? { cache_write_tokens: liveSession.codexCacheWriteTokens } + : {}), + ...(liveSession?.codexNoCacheTokens + ? { no_cache_tokens: liveSession.codexNoCacheTokens } + : {}), + ...(liveSession?.codexReasoningTokens + ? { reasoning_tokens: liveSession.codexReasoningTokens } + : {}), + turns_used: liveSession?.turnCount ?? 0, + total_input_tokens: liveSession?.totalStageInputTokens ?? 0, + total_output_tokens: liveSession?.totalStageOutputTokens ?? 0, + total_total_tokens: liveSession?.totalStageTotalTokens ?? 0, + ...(liveSession?.totalStageCacheReadTokens + ? { total_cache_read_tokens: liveSession.totalStageCacheReadTokens } + : {}), + ...(liveSession?.totalStageCacheWriteTokens + ? { total_cache_write_tokens: liveSession.totalStageCacheWriteTokens } + : {}), + turn_count: liveSession?.turnCount ?? 0, + duration_ms: durationMs, + outcome: input.outcome === "normal" ? "completed" : "failed", + }); + if (execution.stopRequest?.cleanupWorkspace === true) { await this.workspaceManager.removeForIssue(execution.issueId); } + const lastTurnMessage = execution.lastResult?.lastTurn?.message; + const fallbackMessage = execution.lastResult?.liveSession?.lastCodexMessage; + const agentMessage = + (lastTurnMessage !== null && + lastTurnMessage !== undefined && + lastTurnMessage !== "" + ? lastTurnMessage + : fallbackMessage !== null && + fallbackMessage !== undefined && + fallbackMessage !== "" + ? fallbackMessage + : undefined) ?? undefined; + + // Capture remaining state data + const preHistory: ExecutionHistory = [ + ...(state.issueExecutionHistory[execution.issueId] ?? []), + ]; + const preReworkCount = state.issueReworkCounts[execution.issueId] ?? 0; + const capturedTitle = + runningEntry?.issue.title ?? execution.issueIdentifier; + const capturedUrl = runningEntry?.issue.url ?? null; + const capturedRetryAttempt = runningEntry?.retryAttempt ?? null; + const preFailedHas = state.failed.has(execution.issueId); + const capturedFirstDispatchedAt = + state.issueFirstDispatchedAt[execution.issueId] ?? null; + this.orchestrator.onWorkerExit({ issueId: execution.issueId, outcome: input.outcome, ...(input.reason === undefined ? {} : { reason: input.reason }), endedAt: input.endedAt ?? this.now(), + ...(agentMessage === undefined || agentMessage === null + ? {} + : { agentMessage }), }); + + // Use the history snapshot captured inside onWorkerExit (after the stage + // record push but before advanceStage deletes issueExecutionHistory for + // terminal transitions). Fall back to the state map for non-terminal cases, + // then to preHistory as a last resort. + const postHistory: ExecutionHistory = [ + ...(this.orchestrator.consumeExitHistorySnapshot(execution.issueId) ?? + this.orchestrator.getState().issueExecutionHistory[execution.issueId] ?? + preHistory), + ]; + + // Fire notifications after state update + if (this.notifier !== null) { + this.fireWorkerNotification(execution, input, { + preHistory: postHistory, + preReworkCount, + capturedTitle, + capturedUrl, + capturedRetryAttempt, + capturedTurnCount: runningEntry?.turnCount ?? 0, + preFailedHas, + capturedFirstDispatchedAt, + durationMs, + }); + } + } + + private fireWorkerNotification( + execution: WorkerExecution, + input: { + outcome: "normal" | "abnormal"; + reason?: string; + }, + captured: { + preHistory: ExecutionHistory; + preReworkCount: number; + capturedTitle: string; + capturedUrl: string | null; + capturedRetryAttempt: number | null; + capturedTurnCount: number; + preFailedHas: boolean; + capturedFirstDispatchedAt: string | null; + durationMs: number; + }, + ): void { + // biome-ignore lint/style/noNonNullAssertion: caller guards notifier !== null + const notifier = this.notifier!; + const state = this.orchestrator.getState(); + + // Terminal failure — retries exhausted (check first; supersedes infra_error) + const nowFailed = + state.failed.has(execution.issueId) && !captured.preFailedHas; + if (nowFailed) { + const maxRetries = this.config.agent.maxRetryAttempts; + const retriesExhausted = + (captured.capturedRetryAttempt ?? 0) >= maxRetries; + notifier.notify({ + type: "issue_failed", + issueIdentifier: execution.issueIdentifier, + issueTitle: captured.capturedTitle, + issueUrl: captured.capturedUrl, + failureReason: input.reason ?? null, + retriesExhausted, + retryAttempt: captured.capturedRetryAttempt, + }); + return; + } + + // Stall killed — immediate notification regardless of retry + if ( + input.outcome === "abnormal" && + execution.stopRequest?.reason === "stall_timeout" + ) { + notifier.notify({ + type: "stall_killed", + issueIdentifier: execution.issueIdentifier, + issueTitle: captured.capturedTitle, + stageName: execution.stageName, + stallDurationMs: captured.durationMs, + }); + return; + } + + // Infra error — abnormal exit with 0 turns (agent never started) + if (input.outcome === "abnormal" && captured.capturedTurnCount === 0) { + notifier.notify({ + type: "infra_error", + issueIdentifier: execution.issueIdentifier, + issueTitle: captured.capturedTitle, + errorReason: input.reason ?? "unknown error", + }); + return; + } + + // Terminal completion: issue is in completed set AND no continuation retry was scheduled + // (completed is only added for terminal completions; hasContinuationRetry kept as defense-in-depth) + const isInCompleted = state.completed.has(execution.issueId); + const hasContinuationRetry = + state.retryAttempts[execution.issueId] !== undefined; + const isNewlyFailed = + state.failed.has(execution.issueId) && !captured.preFailedHas; + + if (isInCompleted && !hasContinuationRetry && !isNewlyFailed) { + const totalTokens = captured.preHistory.reduce( + (sum, r) => sum + r.totalTokens, + 0, + ); + const totalDurationMs = + captured.capturedFirstDispatchedAt !== null + ? this.now().getTime() - + Date.parse(captured.capturedFirstDispatchedAt) + : captured.durationMs; + notifier.notify({ + type: "issue_completed", + issueIdentifier: execution.issueIdentifier, + issueTitle: captured.capturedTitle, + issueUrl: captured.capturedUrl, + executionHistory: captured.preHistory, + reworkCount: captured.preReworkCount, + totalTokens, + totalDurationMs, + }); + } } private enqueue<T>(task: () => Promise<T> | T): Promise<T> { @@ -468,6 +800,7 @@ export async function startRuntimeService( let workspaceManager = options.workspaceManager ?? createWorkspaceManagerFromConfig(currentConfig, logger); + const notifier = options.notifier ?? null; const runtimeHost = options.runtimeHost ?? new OrchestratorRuntimeHost({ @@ -475,10 +808,12 @@ export async function startRuntimeService( tracker, logger, workspaceManager, + notifier, ...(options.now === undefined ? {} : { now: options.now }), }); const usesManagedTracker = options.tracker === undefined; const usesManagedWorkspaceManager = options.workspaceManager === undefined; + const startupTimestamp = Date.now(); await cleanupTerminalIssueWorkspaces({ tracker, @@ -502,6 +837,7 @@ export async function startRuntimeService( const exitPromise = createExitPromise(); let pollTimer: NodeJS.Timeout | null = null; let shuttingDown = false; + let pendingExitCode = 0; const scheduleNextPoll = () => { if (stopController.signal.aborted) { @@ -515,14 +851,16 @@ export async function startRuntimeService( const runPollCycle = async () => { try { + const pollStart = Date.now(); const result = await runtimeHost.pollOnce(); - await logPollCycleResult(logger, result); + const durationMs = Date.now() - pollStart; + await logPollCycleResult(logger, result, durationMs); scheduleNextPoll(); } catch (error) { await logger.error("runtime_poll_failed", toErrorMessage(error), { error_code: ERROR_CODES.cliStartupFailed, }); - resolveExit(exitPromise, 1); + pendingExitCode = 1; void shutdown(); } }; @@ -531,7 +869,6 @@ export async function startRuntimeService( void logger.info("runtime_shutdown_signal", `received ${signal}`, { reason: signal, }); - resolveExit(exitPromise, 0); void shutdown(); }; @@ -603,13 +940,15 @@ export async function startRuntimeService( : options.workflowWatcher; workflowWatcher?.start(); + const shutdownTimeoutMs = + options.shutdownTimeoutMs ?? SHUTDOWN_IDLE_TIMEOUT_MS; + const shutdown = async () => { if (shuttingDown) { await exitPromise.closed; return; } shuttingDown = true; - resolveExit(exitPromise, 0); stopController.abort(); if (pollTimer !== null) { @@ -619,21 +958,66 @@ export async function startRuntimeService( removeSignalHandlers(); + const shutdownStart = Date.now(); + const workersAborted = runtimeHost.abortAllWorkers(); + + let timedOut = false; + const idleOrTimeout = new Promise<void>((resolve) => { + const timer = setTimeout(() => { + timedOut = true; + void logger.warn( + "shutdown_idle_timeout", + "Timed out waiting for workers to become idle; proceeding with exit.", + { timeout_ms: shutdownTimeoutMs }, + ); + resolve(); + }, shutdownTimeoutMs); + void runtimeHost.waitForIdle().then(() => { + clearTimeout(timer); + resolve(); + }); + }); + await Promise.allSettled([ - runtimeHost.waitForIdle(), + idleOrTimeout, dashboard?.close() ?? Promise.resolve(), workflowWatcher?.close() ?? Promise.resolve(), ]); + await logger.info("shutdown_complete", "Shutdown complete.", { + workers_aborted: workersAborted, + timed_out: timedOut, + duration_ms: Date.now() - shutdownStart, + }); + + const runtimeState = runtimeHost.getState(); + runtimeHost.notifier?.notify({ + type: "pipeline_stopped", + productName, + completedCount: runtimeState.completed.size, + failedCount: runtimeState.failed.size, + durationMs: Date.now() - startupTimestamp, + }); + + resolveExit(exitPromise, pendingExitCode); resolveClosed(exitPromise); }; await logger.info("runtime_starting", "Symphony runtime started.", { + symphony_version: getDisplayVersion(), poll_interval_ms: currentConfig.polling.intervalMs, max_concurrent_agents: currentConfig.agent.maxConcurrentAgents, ...(dashboard === null ? {} : { port: dashboard.port }), }); + const productName = extractProductName(currentConfig.workflowPath); + runtimeHost.notifier?.notify({ + type: "pipeline_started", + productName, + dashboardUrl: + dashboard !== null ? `http://localhost:${dashboard.port}` : null, + }); + void runPollCycle(); return { @@ -650,6 +1034,7 @@ export async function startRuntimeService( async function logPollCycleResult( logger: StructuredLogger, result: Awaited<ReturnType<OrchestratorRuntimeHost["pollOnce"]>>, + durationMs: number, ): Promise<void> { if (!result.validation.ok) { await logger.error( @@ -682,6 +1067,13 @@ async function logPollCycleResult( }, ); } + + await logger.info("poll_tick_completed", "Poll tick completed.", { + dispatched_count: result.dispatchedIssueIds.length, + running_count: result.runningCount, + reconciled_stop_requests: result.stopRequests.length, + duration_ms: durationMs, + }); } async function createRuntimeWorkflowWatcher(input: { @@ -908,14 +1300,33 @@ async function logAgentEvent( session_id: event.sessionId ?? null, thread_id: event.threadId ?? null, turn_id: event.turnId ?? null, + turn_number: event.turnCount, attempt: event.attempt, workspace_path: event.workspacePath, + ...(event.promptChars !== undefined + ? { prompt_chars: event.promptChars } + : {}), + ...(event.estimatedPromptTokens !== undefined + ? { estimated_prompt_tokens: event.estimatedPromptTokens } + : {}), ...(event.usage === undefined ? {} : { input_tokens: event.usage.inputTokens, output_tokens: event.usage.outputTokens, total_tokens: event.usage.totalTokens, + ...(event.usage.cacheReadTokens !== undefined + ? { cache_read_tokens: event.usage.cacheReadTokens } + : {}), + ...(event.usage.cacheWriteTokens !== undefined + ? { cache_write_tokens: event.usage.cacheWriteTokens } + : {}), + ...(event.usage.noCacheTokens !== undefined + ? { no_cache_tokens: event.usage.noCacheTokens } + : {}), + ...(event.usage.reasoningTokens !== undefined + ? { reasoning_tokens: event.usage.reasoningTokens } + : {}), }), }); } @@ -994,7 +1405,7 @@ function toRetryIssueDetail( running: null, retry: { attempt: retry.attempt, - due_at: new Date(retry.dueAtMs).toISOString(), + due_at: formatEasternTimestamp(new Date(retry.dueAtMs)), error: retry.error, }, logs: { @@ -1093,3 +1504,15 @@ function supportsConfigUpdate( } { return "updateConfig" in value && typeof value.updateConfig === "function"; } + +/** + * Extract a human-readable product name from a WORKFLOW file path. + * E.g., "/path/to/WORKFLOW-symphony.md" → "symphony" + * "/path/to/WORKFLOW.md" → "WORKFLOW" + */ +export function extractProductName(workflowPath: string): string { + const filename = workflowPath.split("/").pop() ?? workflowPath; + const base = filename.replace(/\.md$/i, ""); + const match = /^WORKFLOW-(.+)$/i.exec(base); + return match !== null ? (match[1] ?? base) : base; +} diff --git a/src/reactions.ts b/src/reactions.ts new file mode 100644 index 00000000..a8eb7233 --- /dev/null +++ b/src/reactions.ts @@ -0,0 +1,49 @@ +/** + * Reaction lifecycle helpers for Slack message processing. + * + * Manages the emoji reaction indicators that show message processing state: + * - eyes: processing in progress + * - white_check_mark: completed successfully + * - x: completed with error + * - warning: configuration issue (e.g., unmapped channel) + */ +import type { webApi } from "@slack/bolt"; + +/** Mark a message as being processed (add eyes reaction). */ +export async function markProcessing( + client: webApi.WebClient, + channel: string, + timestamp: string, +): Promise<void> { + await client.reactions.add({ channel, timestamp, name: "eyes" }); +} + +/** Mark a message as successfully completed (replace eyes with checkmark). */ +export async function markSuccess( + client: webApi.WebClient, + channel: string, + timestamp: string, +): Promise<void> { + await client.reactions.remove({ channel, timestamp, name: "eyes" }); + await client.reactions.add({ channel, timestamp, name: "white_check_mark" }); +} + +/** Mark a message as failed (replace eyes with x). */ +export async function markError( + client: webApi.WebClient, + channel: string, + timestamp: string, +): Promise<void> { + await client.reactions.remove({ channel, timestamp, name: "eyes" }); + await client.reactions.add({ channel, timestamp, name: "x" }); +} + +/** Mark a message as having a configuration warning (replace eyes with warning). */ +export async function markWarning( + client: webApi.WebClient, + channel: string, + timestamp: string, +): Promise<void> { + await client.reactions.remove({ channel, timestamp, name: "eyes" }); + await client.reactions.add({ channel, timestamp, name: "warning" }); +} diff --git a/src/runners/claude-code-runner.ts b/src/runners/claude-code-runner.ts new file mode 100644 index 00000000..9192da6d --- /dev/null +++ b/src/runners/claude-code-runner.ts @@ -0,0 +1,223 @@ +import { statSync } from "node:fs"; +import { join } from "node:path"; +import { generateText } from "ai"; +import { claudeCode } from "ai-sdk-provider-claude-code"; + +import type { AgentRunnerCodexClient } from "../agent/runner.js"; +import type { + CodexClientEvent, + CodexTurnResult, + CodexUsage, +} from "../codex/app-server-client.js"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; + +// ai-sdk-provider-claude-code uses short model names, not full Anthropic IDs. +// Map standard names to provider-expected short names. +const MODEL_ID_MAP: Record<string, string> = { + "claude-opus-4": "opus", + "claude-opus-4-6": "opus", + "claude-sonnet-4": "sonnet", + "claude-sonnet-4-5": "sonnet", + "claude-haiku-4": "haiku", + "claude-haiku-4-5": "haiku", +}; + +export function resolveClaudeModelId(model: string): string { + return MODEL_ID_MAP[model] ?? model; +} + +export interface ClaudeCodeRunnerOptions { + cwd: string; + model: string; + onEvent?: (event: CodexClientEvent) => void; + /** Interval in ms for workspace file-change heartbeat polling. Defaults to 5000. Set to 0 to disable. */ + heartbeatIntervalMs?: number; +} + +export class ClaudeCodeRunner implements AgentRunnerCodexClient { + private readonly options: ClaudeCodeRunnerOptions; + private sessionId: string; + private turnCount = 0; + private closed = false; + // AbortController for the in-flight generateText call. + // claude-code provider keeps a subprocess alive — aborting ensures cleanup. + private activeTurnController: AbortController | null = null; + + constructor(options: ClaudeCodeRunnerOptions) { + this.options = options; + this.sessionId = `claude-${Date.now()}`; + } + + async startSession(input: { + prompt: string; + title: string; + }): Promise<CodexTurnResult> { + return this.executeTurn(input.prompt, input.title); + } + + async continueTurn(prompt: string, title: string): Promise<CodexTurnResult> { + return this.executeTurn(prompt, title); + } + + async close(): Promise<void> { + this.closed = true; + // Abort any in-flight turn so the claude-code subprocess is killed + this.activeTurnController?.abort(); + this.activeTurnController = null; + } + + private async executeTurn( + prompt: string, + _title: string, + ): Promise<CodexTurnResult> { + this.turnCount += 1; + const turnId = `turn-${this.turnCount}`; + const threadId = this.sessionId; + const fullSessionId = `${threadId}-${turnId}`; + + this.emit({ + event: "session_started", + sessionId: fullSessionId, + threadId, + turnId, + }); + + const controller = new AbortController(); + this.activeTurnController = controller; + + const heartbeatMs = this.options.heartbeatIntervalMs ?? 5000; + let heartbeatTimer: ReturnType<typeof setInterval> | null = null; + + try { + // Start workspace file-change heartbeat polling. + // Watch both .git/index (implementation stages) and the workspace root + // directory (review stages that never touch git but do read/write files). + if (heartbeatMs > 0) { + const gitIndexPath = join(this.options.cwd, ".git", "index"); + const workspacePath = this.options.cwd; + let lastGitMtimeMs = getMtimeMs(gitIndexPath); + let lastWorkspaceMtimeMs = getMtimeMs(workspacePath); + heartbeatTimer = setInterval(() => { + const currentGitMtimeMs = getMtimeMs(gitIndexPath); + const currentWorkspaceMtimeMs = getMtimeMs(workspacePath); + const gitChanged = currentGitMtimeMs !== lastGitMtimeMs; + const workspaceChanged = + currentWorkspaceMtimeMs !== lastWorkspaceMtimeMs; + if (gitChanged || workspaceChanged) { + lastGitMtimeMs = currentGitMtimeMs; + lastWorkspaceMtimeMs = currentWorkspaceMtimeMs; + const source = + gitChanged && workspaceChanged + ? "git index and workspace dir" + : gitChanged + ? "git index" + : "workspace dir"; + this.emit({ + event: "activity_heartbeat", + sessionId: fullSessionId, + threadId, + turnId, + message: `workspace file change detected (${source})`, + }); + } + }, heartbeatMs); + } + + const resolvedModel = resolveClaudeModelId(this.options.model); + const result = await generateText({ + model: claudeCode(resolvedModel, { + cwd: this.options.cwd, + permissionMode: "bypassPermissions", + }), + prompt, + abortSignal: controller.signal, + }); + + const usage: CodexUsage = { + inputTokens: result.usage.inputTokens ?? 0, + outputTokens: result.usage.outputTokens ?? 0, + totalTokens: result.usage.totalTokens ?? 0, + ...(result.usage.inputTokenDetails?.cacheReadTokens !== undefined + ? { cacheReadTokens: result.usage.inputTokenDetails.cacheReadTokens } + : {}), + ...(result.usage.inputTokenDetails?.cacheWriteTokens !== undefined + ? { + cacheWriteTokens: result.usage.inputTokenDetails.cacheWriteTokens, + } + : {}), + ...(result.usage.inputTokenDetails?.noCacheTokens !== undefined + ? { noCacheTokens: result.usage.inputTokenDetails.noCacheTokens } + : {}), + ...(result.usage.outputTokenDetails?.reasoningTokens !== undefined + ? { reasoningTokens: result.usage.outputTokenDetails.reasoningTokens } + : {}), + }; + + this.emit({ + event: "turn_completed", + sessionId: fullSessionId, + threadId, + turnId, + usage, + message: result.text, + }); + + return { + status: "completed", + threadId, + turnId, + sessionId: fullSessionId, + usage, + rateLimits: null, + message: result.text, + }; + } catch (error) { + const message = + error instanceof Error ? error.message : "Claude Code turn failed"; + + this.emit({ + event: "turn_failed", + sessionId: fullSessionId, + threadId, + turnId, + message, + }); + + return { + status: "failed", + threadId, + turnId, + sessionId: fullSessionId, + usage: null, + rateLimits: null, + message, + }; + } finally { + if (heartbeatTimer !== null) { + clearInterval(heartbeatTimer); + } + // Clear the controller ref so close() doesn't abort a completed turn + if (this.activeTurnController === controller) { + this.activeTurnController = null; + } + } + } + + private emit( + input: Omit<CodexClientEvent, "timestamp" | "codexAppServerPid">, + ): void { + this.options.onEvent?.({ + ...input, + timestamp: formatEasternTimestamp(new Date()), + codexAppServerPid: null, + }); + } +} + +function getMtimeMs(filePath: string): number { + try { + return statSync(filePath).mtimeMs; + } catch { + return 0; + } +} diff --git a/src/runners/factory.ts b/src/runners/factory.ts new file mode 100644 index 00000000..3ade94e2 --- /dev/null +++ b/src/runners/factory.ts @@ -0,0 +1,42 @@ +import type { AgentRunnerCodexClient } from "../agent/runner.js"; +import { ClaudeCodeRunner } from "./claude-code-runner.js"; +import { GeminiRunner } from "./gemini-runner.js"; +import type { RunnerFactoryInput, RunnerKind } from "./types.js"; + +const DEFAULT_MODELS: Record<RunnerKind, string> = { + codex: "codex", + "claude-code": "sonnet", + gemini: "gemini-2.5-pro", +}; + +export function createRunnerFromConfig( + input: RunnerFactoryInput, +): AgentRunnerCodexClient { + const { config, cwd, onEvent } = input; + const model = config.model ?? DEFAULT_MODELS[config.kind]; + + switch (config.kind) { + case "claude-code": + return new ClaudeCodeRunner({ + cwd, + model, + onEvent, + }); + + case "gemini": + return new GeminiRunner({ + cwd, + model, + onEvent, + }); + + case "codex": + throw new Error( + "Codex runner uses the native CodexAppServerClient — use createCodexClient instead of createRunnerFromConfig for runner kind 'codex'.", + ); + } +} + +export function isAiSdkRunner(kind: RunnerKind): boolean { + return kind !== "codex"; +} diff --git a/src/runners/gemini-runner.ts b/src/runners/gemini-runner.ts new file mode 100644 index 00000000..237d2708 --- /dev/null +++ b/src/runners/gemini-runner.ts @@ -0,0 +1,134 @@ +import { type LanguageModel, generateText } from "ai"; + +import type { AgentRunnerCodexClient } from "../agent/runner.js"; +import type { + CodexClientEvent, + CodexTurnResult, +} from "../codex/app-server-client.js"; +import { formatEasternTimestamp } from "../logging/format-timestamp.js"; + +export interface GeminiRunnerOptions { + cwd: string; + model: string; + onEvent?: (event: CodexClientEvent) => void; +} + +// Lazy-loaded provider — ai-sdk-provider-gemini-cli is ESM-only, +// require() returns an empty module. Dynamic import() is safe in all contexts. +let cachedProvider: ((model: string) => LanguageModel) | null = null; + +async function getGeminiProvider(): Promise<(model: string) => LanguageModel> { + if (cachedProvider) return cachedProvider; + const { createGeminiProvider } = await import("ai-sdk-provider-gemini-cli"); + const provider = createGeminiProvider(); + cachedProvider = provider as (model: string) => LanguageModel; + return cachedProvider; +} + +export class GeminiRunner implements AgentRunnerCodexClient { + private readonly options: GeminiRunnerOptions; + private sessionId: string; + private turnCount = 0; + private closed = false; + + constructor(options: GeminiRunnerOptions) { + this.options = options; + this.sessionId = `gemini-${Date.now()}`; + } + + async startSession(input: { + prompt: string; + title: string; + }): Promise<CodexTurnResult> { + return this.executeTurn(input.prompt, input.title); + } + + async continueTurn(prompt: string, title: string): Promise<CodexTurnResult> { + return this.executeTurn(prompt, title); + } + + async close(): Promise<void> { + this.closed = true; + } + + private async executeTurn( + prompt: string, + _title: string, + ): Promise<CodexTurnResult> { + this.turnCount += 1; + const turnId = `turn-${this.turnCount}`; + const threadId = this.sessionId; + const fullSessionId = `${threadId}-${turnId}`; + + this.emit({ + event: "session_started", + sessionId: fullSessionId, + threadId, + turnId, + }); + + try { + const provider = await getGeminiProvider(); + const result = await generateText({ + model: provider(this.options.model), + prompt, + }); + + const usage = { + inputTokens: result.usage.inputTokens ?? 0, + outputTokens: result.usage.outputTokens ?? 0, + totalTokens: result.usage.totalTokens ?? 0, + }; + + this.emit({ + event: "turn_completed", + sessionId: fullSessionId, + threadId, + turnId, + usage, + message: result.text, + }); + + return { + status: "completed", + threadId, + turnId, + sessionId: fullSessionId, + usage, + rateLimits: null, + message: result.text, + }; + } catch (error) { + const message = + error instanceof Error ? error.message : "Gemini turn failed"; + + this.emit({ + event: "turn_failed", + sessionId: fullSessionId, + threadId, + turnId, + message, + }); + + return { + status: "failed", + threadId, + turnId, + sessionId: fullSessionId, + usage: null, + rateLimits: null, + message, + }; + } + } + + private emit( + input: Omit<CodexClientEvent, "timestamp" | "codexAppServerPid">, + ): void { + this.options.onEvent?.({ + ...input, + timestamp: formatEasternTimestamp(new Date()), + codexAppServerPid: null, + }); + } +} diff --git a/src/runners/index.ts b/src/runners/index.ts new file mode 100644 index 00000000..08af3eca --- /dev/null +++ b/src/runners/index.ts @@ -0,0 +1,4 @@ +export * from "./types.js"; +export * from "./factory.js"; +export * from "./claude-code-runner.js"; +export * from "./gemini-runner.js"; diff --git a/src/runners/types.ts b/src/runners/types.ts new file mode 100644 index 00000000..c9287a09 --- /dev/null +++ b/src/runners/types.ts @@ -0,0 +1,26 @@ +import type { AgentRunnerCodexClient } from "../agent/runner.js"; +import type { CodexClientEvent } from "../codex/app-server-client.js"; + +export type RunnerKind = "codex" | "claude-code" | "gemini"; + +export const RUNNER_KINDS: readonly RunnerKind[] = [ + "codex", + "claude-code", + "gemini", +] as const; + +export interface RunnerConfig { + kind: RunnerKind; + model: string | null; +} + +export interface RunnerFactoryInput { + config: RunnerConfig; + cwd: string; + onEvent: (event: CodexClientEvent) => void; +} + +export type { AgentRunnerCodexClient as Runner }; +export type RunnerFactory = ( + input: RunnerFactoryInput, +) => AgentRunnerCodexClient; diff --git a/src/slack-bot/format.ts b/src/slack-bot/format.ts new file mode 100644 index 00000000..f72e436c --- /dev/null +++ b/src/slack-bot/format.ts @@ -0,0 +1,81 @@ +/** + * Markdown-to-mrkdwn converter for non-streamed Slack content. + * + * Streamed content uses `markdown_text` which accepts standard markdown natively. + * This converter is only used for non-streamed content such as error messages, + * slash command responses, and unmapped channel warnings posted via `say()`. + * + * Uses a protected-region pattern: fenced code blocks and inline code are + * extracted as placeholders before conversion, then restored afterward. + */ + +/** Placeholder prefix used to protect code regions during conversion. */ +const PLACEHOLDER_PREFIX = "\x00CODE_REGION_"; + +/** Placeholder prefix for bold regions to prevent italic conversion. */ +const BOLD_PREFIX = "\x00BOLD_REGION_"; + +/** + * Convert standard Markdown to Slack mrkdwn format. + * + * Protected regions (fenced code blocks and inline code) are preserved as-is. + * Converts: links, headers, bold, italic, and strikethrough. + */ +export function markdownToMrkdwn(markdown: string): string { + const regions: string[] = []; + const boldRegions: string[] = []; + + // Step 1: Extract protected regions (fenced code blocks first, then inline code) + let text = markdown; + + // Fenced code blocks: ```...``` + text = text.replace(/```[\s\S]*?```/g, (match) => { + const index = regions.length; + regions.push(match); + return `${PLACEHOLDER_PREFIX}${index}\x00`; + }); + + // Inline code: `...` + text = text.replace(/`[^`]+`/g, (match) => { + const index = regions.length; + regions.push(match); + return `${PLACEHOLDER_PREFIX}${index}\x00`; + }); + + // Step 2: Convert markdown syntax to mrkdwn + + // Links: [text](url) → <url|text> + text = text.replace(/\[([^\]]+)\]\(([^)]+)\)/g, "<$2|$1>"); + + // Headers: ## Header → *Header* (protect from italic conversion) + text = text.replace(/^#{1,6}\s+(.+)$/gm, (_match, content: string) => { + const index = boldRegions.length; + boldRegions.push(`*${content}*`); + return `${BOLD_PREFIX}${index}\x00`; + }); + + // Bold: **text** → *text* (protect from italic conversion) + text = text.replace(/\*\*(.+?)\*\*/g, (_match, content: string) => { + const index = boldRegions.length; + boldRegions.push(`*${content}*`); + return `${BOLD_PREFIX}${index}\x00`; + }); + + // Italic: *text* → _text_ + text = text.replace(/(?<!\*)\*(?!\*)(.+?)(?<!\*)\*(?!\*)/g, "_$1_"); + + // Strikethrough: ~~text~~ → ~text~ + text = text.replace(/~~(.+?)~~/g, "~$1~"); + + // Step 3: Restore bold regions + for (let i = boldRegions.length - 1; i >= 0; i--) { + text = text.replace(`${BOLD_PREFIX}${i}\x00`, boldRegions[i] ?? ""); + } + + // Step 4: Restore code regions + for (let i = regions.length - 1; i >= 0; i--) { + text = text.replace(`${PLACEHOLDER_PREFIX}${i}\x00`, regions[i] ?? ""); + } + + return text; +} diff --git a/src/slack-bot/handler.ts b/src/slack-bot/handler.ts new file mode 100644 index 00000000..854dfd7b --- /dev/null +++ b/src/slack-bot/handler.ts @@ -0,0 +1,233 @@ +/** + * Core message handler for the Slack bot. + * + * Receives messages via Bolt's app.message() listener, manages reaction indicators, + * invokes Claude Code via the AI SDK streamText, and progressively streams replies + * using Slack's ChatStreamer API. + * Supports session continuity (thread replies resume CC sessions) and + * runtime channel-to-project mapping via /project set slash commands. + */ +import type { AllMiddlewareArgs, SlackEventMiddlewareArgs } from "@slack/bolt"; +import type { WebClient } from "@slack/web-api"; +import { streamText } from "ai"; +import { claudeCode } from "ai-sdk-provider-claude-code"; + +import { + markError, + markProcessing, + markSuccess, + markWarning, +} from "../reactions.js"; +import { resolveClaudeModelId } from "../runners/claude-code-runner.js"; +import { markdownToMrkdwn } from "./format.js"; +import type { CcSessionStore } from "./session-store.js"; +import { getCcSessionId, setCcSessionId } from "./session-store.js"; +import { parseSlashCommand } from "./slash-commands.js"; +import { StreamConsumer } from "./stream-consumer.js"; +import type { ChannelProjectMap, SessionMap } from "./types.js"; + +export interface HandleMessageOptions { + /** Channel ID → project directory mapping */ + channelMap: ChannelProjectMap; + /** In-memory session store */ + sessions: SessionMap; + /** In-memory CC session store (thread ID → CC session ID) */ + ccSessions: CcSessionStore; + /** Claude Code model identifier (default: "sonnet") */ + model?: string; +} + +/** Bolt message handler arguments. */ +export type BoltMessageArgs = SlackEventMiddlewareArgs<"message"> & + AllMiddlewareArgs; + +/** + * Split a response into paragraph-sized chunks at `\n\n` boundaries. + * Returns the original text as a single-element array if no paragraph breaks exist. + * + * @deprecated Use `chunkResponse()` from `../chunking.js` instead, which also + * enforces the 39,000 character Slack message limit. + */ +export function splitAtParagraphs(text: string): string[] { + const chunks = text.split(/\n\n+/).filter((chunk) => chunk.trim().length > 0); + return chunks.length > 0 ? chunks : [text]; +} + +/** Truncate a string to a maximum length, adding ellipsis if truncated. */ +function truncateDetail(detail: string, maxLength = 500): string { + if (detail.length <= maxLength) { + return detail; + } + return `${detail.slice(0, maxLength)}…`; +} + +/** + * Set the assistant thread status (best-effort, silent no-op if scope unavailable). + */ +async function setThinkingStatus( + client: WebClient, + channel: string, + threadTs: string, +): Promise<void> { + try { + await client.assistant.threads.setStatus({ + channel_id: channel, + thread_ts: threadTs, + status: "is thinking...", + }); + } catch { + // Silent no-op — scope may not be available + } +} + +/** + * Creates a message handler function for use with `app.message()`. + */ +export function createMessageHandler(options: HandleMessageOptions) { + const { channelMap, sessions, ccSessions, model = "sonnet" } = options; + + return async (args: BoltMessageArgs): Promise<void> => { + const { message, say, client, context } = args; + + // Filter bot's own messages and message updates/deletions + const subtype = "subtype" in message ? message.subtype : undefined; + if ( + "bot_id" in message || + subtype === "bot_message" || + subtype === "message_changed" || + subtype === "message_deleted" + ) { + return; + } + + // Extract message text — only present on GenericMessageEvent (no subtype) + const text = "text" in message ? (message.text ?? "") : ""; + + // Derive thread and message identifiers + const threadTs = + "thread_ts" in message ? (message.thread_ts ?? message.ts) : message.ts; + const messageTs = message.ts; + const channel = message.channel; + + // Extract user and team IDs for streaming + const userId = "user" in message ? (message.user as string) : ""; + const teamId = context.teamId; + + // Check for slash commands before anything else + const command = parseSlashCommand(text); + if (command) { + if (command.type === "project-set") { + channelMap.set(channel, command.path); + await say({ + text: markdownToMrkdwn( + `Project directory for this channel set to \`${command.path}\`.`, + ), + thread_ts: threadTs, + }); + } + return; + } + + // Add eyes reaction to indicate processing + await markProcessing(client, channel, messageTs); + + try { + // Resolve channel → project directory + const projectDir = channelMap.get(channel); + if (!projectDir) { + await say({ + text: markdownToMrkdwn( + `No project directory mapped for channel \`${channel}\`. Please configure a channel-to-project mapping.`, + ), + thread_ts: threadTs, + }); + await markWarning(client, channel, messageTs); + return; + } + + // Track session + sessions.set(threadTs, { + channelId: channel, + projectDir, + lastActiveAt: new Date(), + }); + + // Build CC provider options with session continuity + const resolvedModel = resolveClaudeModelId(model); + const existingSessionId = getCcSessionId(ccSessions, threadTs); + const ccOptions: { + cwd: string; + permissionMode: "bypassPermissions"; + resume?: string; + } = { + cwd: projectDir, + permissionMode: "bypassPermissions", + }; + if (existingSessionId) { + ccOptions.resume = existingSessionId; + } + + // Set "is thinking..." status (best-effort) + await setThinkingStatus( + client as unknown as WebClient, + channel, + threadTs, + ); + + // Invoke Claude Code via AI SDK streamText + const result = streamText({ + model: claudeCode(resolvedModel, ccOptions), + prompt: text, + }); + + // Progressively stream response via Slack ChatStreamer + const consumer = new StreamConsumer( + client as unknown as WebClient, + channel, + threadTs, + userId, + teamId, + ); + try { + for await (const chunk of result.textStream) { + await consumer.append(chunk); + } + await consumer.finish(); + } catch (error) { + await consumer.finish(); // ensure cleanup + throw error; + } + + // Extract and store session ID from provider metadata for continuity + const response = await result.response; + const lastMsg = response.messages?.[response.messages.length - 1] as + | { + providerMetadata?: { + "claude-code"?: { sessionId?: string }; + }; + } + | undefined; + const ccSessionId = lastMsg?.providerMetadata?.["claude-code"]?.sessionId; + if (ccSessionId) { + setCcSessionId(ccSessions, threadTs, ccSessionId); + } + + // Replace eyes with checkmark on success + await markSuccess(client, channel, messageTs); + } catch (error) { + // Replace eyes with error indicator on failure + await markError(client, channel, messageTs); + + const errorType = + error instanceof Error ? error.constructor.name : "Error"; + const errorDetail = + error instanceof Error ? error.message : "An unexpected error occurred"; + await say({ + text: markdownToMrkdwn( + `Error: ${errorType}\n${truncateDetail(errorDetail)}`, + ), + thread_ts: threadTs, + }); + } + }; +} diff --git a/src/slack-bot/index.ts b/src/slack-bot/index.ts new file mode 100644 index 00000000..d18b6d26 --- /dev/null +++ b/src/slack-bot/index.ts @@ -0,0 +1,115 @@ +/** + * Slack bot entry point. + * + * Configures a Bolt App with Socket Mode, + * registers message handlers, and exports the app. + */ +import { App } from "@slack/bolt"; + +import { createMessageHandler } from "./handler.js"; +import { createCcSessionStore } from "./session-store.js"; +import type { ChannelProjectMap, SessionMap, SlackBotConfig } from "./types.js"; + +export type { SlackBotConfig, ChannelProjectMap, SessionMap } from "./types.js"; +export type { CcSessionStore } from "./session-store.js"; +export { + createCcSessionStore, + getCcSessionId, + setCcSessionId, +} from "./session-store.js"; +export { parseSlashCommand } from "./slash-commands.js"; +export { createMessageHandler, splitAtParagraphs } from "./handler.js"; +export { markdownToMrkdwn } from "./format.js"; +export { StreamConsumer } from "./stream-consumer.js"; +export { chunkResponse, SLACK_MAX_CHARS } from "../chunking.js"; +export { + markProcessing, + markSuccess, + markError, + markWarning, +} from "../reactions.js"; +export { collectStream } from "../streaming.js"; + +/** + * Parse a JSON string of channel→project mappings into a ChannelProjectMap. + * Expected format: `{ "C123": "/path/to/project", "C456": "/other/project" }` + */ +export function parseChannelProjectMap(json: string): ChannelProjectMap { + const parsed: unknown = JSON.parse(json); + if (typeof parsed !== "object" || parsed === null || Array.isArray(parsed)) { + throw new Error("CHANNEL_PROJECT_MAP must be a JSON object"); + } + const map: ChannelProjectMap = new Map(); + for (const [key, value] of Object.entries( + parsed as Record<string, unknown>, + )) { + if (typeof value !== "string") { + throw new Error( + `CHANNEL_PROJECT_MAP values must be strings, got ${typeof value} for key "${key}"`, + ); + } + map.set(key, value); + } + return map; +} + +/** In-memory session store shared across handlers. */ +const sessions: SessionMap = new Map(); + +/** In-memory CC session store for session continuity. */ +const ccSessions = createCcSessionStore(); + +/** + * Create and configure a Bolt App for the Slack bot using Socket Mode. + * + * Returns the App instance and associated session stores. + */ +export function createSlackBoltApp(config: SlackBotConfig) { + const { botToken, appToken, channelMap, model } = config; + + const app = new App({ + token: botToken, + appToken, + socketMode: true, + }); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + ...(model !== undefined ? { model } : {}), + }); + + // Match ALL messages — no @mention required per spec + app.message(handler); + + return { + app, + /** The in-memory session store (exposed for testing / monitoring). */ + sessions, + /** The in-memory CC session store (exposed for testing / monitoring). */ + ccSessions, + }; +} + +/** + * Start the Slack bot using Socket Mode. + * + * Creates the Bolt app, registers handlers, and connects via WebSocket. + */ +export async function startSlackBot(config: SlackBotConfig): Promise<{ + app: App; + sessions: SessionMap; + ccSessions: ReturnType<typeof createCcSessionStore>; +}> { + const result = createSlackBoltApp(config); + + await result.app.start(); + + const channelCount = config.channelMap.size; + console.log( + `Slack bot connected via Socket Mode (${channelCount} channel mapping${channelCount === 1 ? "" : "s"})`, + ); + + return result; +} diff --git a/src/slack-bot/server.ts b/src/slack-bot/server.ts new file mode 100644 index 00000000..96979de6 --- /dev/null +++ b/src/slack-bot/server.ts @@ -0,0 +1,71 @@ +/** + * Standalone entry point for the Slack bot using Socket Mode. + * + * Reads configuration from environment variables, creates a Bolt app, + * registers handlers, and starts Socket Mode connection. + */ +import { parseChannelProjectMap, startSlackBot } from "./index.js"; +import type { SlackBotConfig } from "./types.js"; + +/** + * Load and validate Slack bot configuration from environment variables. + * + * Required env vars: SLACK_BOT_TOKEN, SLACK_APP_TOKEN + * Optional: CHANNEL_PROJECT_MAP (JSON, default {}), CLAUDE_MODEL + * + * @throws {Error} If required environment variables are missing. + */ +export function loadSlackBotConfig( + env: Record<string, string | undefined> = process.env, +): SlackBotConfig { + const missing: string[] = []; + + const botToken = env.SLACK_BOT_TOKEN; + if (!botToken) { + missing.push("SLACK_BOT_TOKEN"); + } + + const appToken = env.SLACK_APP_TOKEN; + if (!appToken) { + missing.push("SLACK_APP_TOKEN"); + } + + if (missing.length > 0) { + throw new Error( + `Missing required environment variables: ${missing.join(", ")}`, + ); + } + + // At this point both botToken and appToken are defined (missing.length === 0). + const resolvedBotToken = botToken as string; + const resolvedAppToken = appToken as string; + + const channelMapJson = env.CHANNEL_PROJECT_MAP ?? "{}"; + const channelMap = parseChannelProjectMap(channelMapJson); + + const model = env.CLAUDE_MODEL; + + return { + botToken: resolvedBotToken, + appToken: resolvedAppToken, + channelMap, + ...(model !== undefined ? { model } : {}), + }; +} + +/* Entry point for direct execution: node dist/src/slack-bot/server.js */ +const isDirectExecution = + import.meta.url === `file://${process.argv[1]}` || + process.argv[1]?.endsWith("/src/slack-bot/server.js"); + +if (isDirectExecution) { + try { + const config = loadSlackBotConfig(); + void startSlackBot(config); + } catch (error) { + console.error( + error instanceof Error ? error.message : "Failed to start Slack bot", + ); + process.exit(1); + } +} diff --git a/src/slack-bot/session-store.ts b/src/slack-bot/session-store.ts new file mode 100644 index 00000000..6c034a14 --- /dev/null +++ b/src/slack-bot/session-store.ts @@ -0,0 +1,38 @@ +/** + * In-memory Claude Code session store for session continuity. + * + * Maps thread IDs to CC session IDs so that thread replies can resume + * the existing Claude Code session. v1 uses an in-memory Map — + * Redis is a future enhancement. + */ + +/** Maps thread ID → Claude Code session ID. */ +export type CcSessionStore = Map<string, string>; + +/** Create a new in-memory CC session store. */ +export function createCcSessionStore(): CcSessionStore { + return new Map(); +} + +/** + * Look up the CC session ID for a given thread. + * Returns `undefined` if no session exists (i.e., new conversation). + */ +export function getCcSessionId( + store: CcSessionStore, + threadId: string, +): string | undefined { + return store.get(threadId); +} + +/** + * Store the CC session ID for a given thread. + * Overwrites any previously stored session ID for the same thread. + */ +export function setCcSessionId( + store: CcSessionStore, + threadId: string, + sessionId: string, +): void { + store.set(threadId, sessionId); +} diff --git a/src/slack-bot/slash-commands.ts b/src/slack-bot/slash-commands.ts new file mode 100644 index 00000000..f6b43b75 --- /dev/null +++ b/src/slack-bot/slash-commands.ts @@ -0,0 +1,31 @@ +/** + * Slash command parsing for the Slack bot. + * + * Parses `/project set <path>` from message text and returns + * structured command objects. Unknown commands return `null`. + */ + +/** A parsed `/project set` command. */ +export interface ProjectSetCommand { + type: "project-set"; + path: string; +} + +export type SlashCommand = ProjectSetCommand; + +/** + * Parse a slash command from message text. + * + * Currently supports: + * - `/project set <path>` — set the channel-to-project mapping + * + * Returns `null` if the text is not a recognized slash command. + */ +export function parseSlashCommand(text: string): SlashCommand | null { + const trimmed = text.trim(); + const match = trimmed.match(/^\/project\s+set\s+(.+)$/); + if (match?.[1]) { + return { type: "project-set", path: match[1].trim() }; + } + return null; +} diff --git a/src/slack-bot/stream-consumer.ts b/src/slack-bot/stream-consumer.ts new file mode 100644 index 00000000..20c813c7 --- /dev/null +++ b/src/slack-bot/stream-consumer.ts @@ -0,0 +1,108 @@ +/** + * Thin wrapper around Slack's `client.chatStream()` for progressive streaming. + * + * Adds lazy initialization (stream created on first `append()` call), + * overflow handling (starts a new stream at 39K boundary), and error cleanup. + * + * The `ChatStreamer` class in `@slack/web-api` already handles buffering + * (default 256 bytes) and the start/append/stop lifecycle. This wrapper + * only adds lazy init, overflow, and error cleanup. + */ +import type { WebClient } from "@slack/web-api"; +import type { ChatStreamer } from "@slack/web-api/dist/chat-stream.js"; + +import { SLACK_MAX_CHARS } from "../chunking.js"; + +/** + * Maximum characters before starting a new stream. + * Uses the same 39K boundary as chunk-based posting. + */ +const STREAM_OVERFLOW_CHARS = SLACK_MAX_CHARS; + +export class StreamConsumer { + private client: WebClient; + private channel: string; + private threadTs: string; + private recipientUserId: string; + private recipientTeamId: string | undefined; + + private streamer: ChatStreamer | null = null; + private charCount = 0; + + constructor( + client: WebClient, + channel: string, + threadTs: string, + recipientUserId: string, + recipientTeamId: string | undefined, + ) { + this.client = client; + this.channel = channel; + this.threadTs = threadTs; + this.recipientUserId = recipientUserId; + this.recipientTeamId = recipientTeamId; + } + + /** + * Append text to the current stream. Creates the stream lazily on first call. + * If accumulated text exceeds the overflow boundary, stops the current stream + * and starts a fresh one. + */ + async append(text: string): Promise<void> { + // Check if appending would overflow the current stream + if ( + this.streamer !== null && + this.charCount + text.length > STREAM_OVERFLOW_CHARS + ) { + await this.stopCurrentStream(); + } + + // Lazy init: create stream on first append (or after overflow reset) + if (this.streamer === null) { + this.streamer = this.createStreamer(); + this.charCount = 0; + } + + await this.streamer.append({ markdown_text: text }); + this.charCount += text.length; + } + + /** + * Finalize the stream. Must be called when done (typically in a finally block). + * Safe to call even if no stream was started (no-op). + */ + async finish(): Promise<void> { + await this.stopCurrentStream(); + } + + private createStreamer(): ChatStreamer { + const args: { + channel: string; + thread_ts: string; + recipient_user_id?: string; + recipient_team_id?: string; + } = { + channel: this.channel, + thread_ts: this.threadTs, + recipient_user_id: this.recipientUserId, + }; + + if (this.recipientTeamId !== undefined) { + args.recipient_team_id = this.recipientTeamId; + } + + return this.client.chatStream(args); + } + + private async stopCurrentStream(): Promise<void> { + if (this.streamer !== null) { + try { + await this.streamer.stop(); + } catch { + // Best-effort cleanup — stream may already be stopped or failed + } + this.streamer = null; + this.charCount = 0; + } + } +} diff --git a/src/slack-bot/types.ts b/src/slack-bot/types.ts new file mode 100644 index 00000000..7e70e81b --- /dev/null +++ b/src/slack-bot/types.ts @@ -0,0 +1,37 @@ +/** + * Type definitions for the Slack bot module. + * + * Channel-to-project-directory mappings and session state are stored + * in-memory (Map) for v1 — Redis is a future enhancement. + */ + +/** Maps Slack channel IDs to local project directories for Claude Code cwd. */ +export type ChannelProjectMap = Map<string, string>; + +/** Configuration for the Slack bot. */ +export interface SlackBotConfig { + /** Slack bot token (xoxb-...) */ + botToken: string; + /** Slack app-level token (xapp-...) for Socket Mode */ + appToken: string; + /** Channel ID → project directory mapping */ + channelMap: ChannelProjectMap; + /** + * Claude Code model identifier (e.g. "sonnet", "opus", "haiku"). + * Defaults to "sonnet". + */ + model?: string; +} + +/** Per-thread session state stored in memory. */ +export interface SessionState { + /** The Slack channel ID where the conversation started */ + channelId: string; + /** The project directory mapped to the channel */ + projectDir: string; + /** Timestamp of the last interaction */ + lastActiveAt: Date; +} + +/** In-memory session map keyed by thread ID. */ +export type SessionMap = Map<string, SessionState>; diff --git a/src/streaming.ts b/src/streaming.ts new file mode 100644 index 00000000..86ad3e2d --- /dev/null +++ b/src/streaming.ts @@ -0,0 +1,22 @@ +/** + * Streaming utilities for collecting AI SDK stream responses. + * + * Provides helpers to consume an async text stream from the Vercel AI SDK + * `streamText()` result and collect the full response text. + */ + +/** + * Collect all chunks from an async text stream into a single string. + * + * @param textStream - The async iterable text stream from `streamText().textStream`. + * @returns The concatenated full response text. + */ +export async function collectStream( + textStream: AsyncIterable<string>, +): Promise<string> { + let fullText = ""; + for await (const chunk of textStream) { + fullText += chunk; + } + return fullText; +} diff --git a/src/test-alpha.ts b/src/test-alpha.ts new file mode 100644 index 00000000..7ac8e4d0 --- /dev/null +++ b/src/test-alpha.ts @@ -0,0 +1,14 @@ +/** + * test-alpha — Test foundation module. + * + * This is the first module in the test chain. Subsequent issues + * will extend it with additional capabilities. + */ + +/** Sentinel that confirms the module loaded successfully. */ +export const TEST_ALPHA_READY = true as const; + +/** Returns a greeting string for validation purposes. */ +export function greet(name: string): string { + return `Hello, ${name}!`; +} diff --git a/src/tracker/linear-client.ts b/src/tracker/linear-client.ts index 835d202d..564a2b41 100644 --- a/src/tracker/linear-client.ts +++ b/src/tracker/linear-client.ts @@ -11,8 +11,14 @@ import { } from "./linear-normalize.js"; import { LINEAR_CANDIDATE_ISSUES_QUERY, + LINEAR_CREATE_COMMENT_MUTATION, + LINEAR_ISSUES_BY_LABELS_QUERY, LINEAR_ISSUES_BY_STATES_QUERY, + LINEAR_ISSUE_PARENT_AND_SIBLINGS_QUERY, LINEAR_ISSUE_STATES_BY_IDS_QUERY, + LINEAR_ISSUE_UPDATE_MUTATION, + LINEAR_OPEN_ISSUES_BY_LABELS_QUERY, + LINEAR_WORKFLOW_STATES_QUERY, } from "./linear-queries.js"; import type { IssueStateSnapshot, IssueTracker } from "./tracker.js"; @@ -48,6 +54,45 @@ interface LinearIssueStatesData { }; } +interface LinearIssueUpdateData { + issueUpdate?: { + success?: boolean; + issue?: { id?: string; state?: { name?: string } }; + }; +} + +interface LinearCommentCreateData { + commentCreate?: { + success?: boolean; + comment?: { id?: string }; + }; +} + +interface LinearIssueParentAndSiblingsData { + issue?: { + id?: string; + identifier?: string; + parent?: { + id?: string; + identifier?: string; + state?: { name?: string }; + children?: { + nodes?: Array<{ + id?: string; + identifier?: string; + state?: { name?: string }; + }>; + }; + } | null; + }; +} + +interface LinearWorkflowStatesData { + workflowStates?: { + nodes?: Array<{ id?: string; name?: string }>; + }; +} + export interface LinearTrackerClientOptions { endpoint: string; apiKey: string | null; @@ -100,6 +145,52 @@ export class LinearTrackerClient implements IssueTracker { }); } + async fetchIssuesByLabels(labelNames: string[]): Promise<Issue[]> { + if (labelNames.length === 0) { + return []; + } + + return this.fetchIssuePages(LINEAR_ISSUES_BY_LABELS_QUERY, { + projectSlug: this.requireProjectSlug(), + labelNames, + first: this.pageSize, + relationFirst: this.pageSize, + }); + } + + async fetchOpenIssuesByLabels( + labelNames: string[], + excludeStateNames: string[], + ): Promise<Issue[]> { + if (labelNames.length === 0) { + return []; + } + + // Single GraphQL call — we only need to know if any non-terminal halt issue + // exists, so fetch at most 1 result. No pagination needed. + const response = await this.postGraphql<LinearCandidateData>( + LINEAR_OPEN_ISSUES_BY_LABELS_QUERY, + { + projectSlug: this.requireProjectSlug(), + labelNames, + excludeStateNames, + first: 1, + relationFirst: this.pageSize, + }, + ); + + const nodes = response.issues?.nodes; + if (!Array.isArray(nodes)) { + throw new TrackerError( + ERROR_CODES.linearUnknownPayload, + "Linear open issues by labels payload was missing issues.nodes.", + { details: response }, + ); + } + + return nodes.map((node) => normalizeLinearIssue(node)); + } + async fetchIssueStatesByIds( issueIds: string[], ): Promise<IssueStateSnapshot[]> { @@ -126,6 +217,109 @@ export class LinearTrackerClient implements IssueTracker { return nodes.map((node) => normalizeLinearIssueState(node)); } + async postComment(issueId: string, body: string): Promise<void> { + const response = await this.postGraphql<LinearCommentCreateData>( + LINEAR_CREATE_COMMENT_MUTATION, + { issueId, body }, + ); + + if (response.commentCreate?.success !== true) { + throw new TrackerError( + ERROR_CODES.linearGraphqlErrors, + "Linear commentCreate mutation did not return success.", + { details: response }, + ); + } + } + + async updateIssueState( + issueId: string, + stateName: string, + teamKey: string, + ): Promise<void> { + const statesResponse = await this.postGraphql<LinearWorkflowStatesData>( + LINEAR_WORKFLOW_STATES_QUERY, + { teamId: teamKey }, + ); + + const states = statesResponse.workflowStates?.nodes; + if (!Array.isArray(states)) { + throw new TrackerError( + ERROR_CODES.linearUnknownPayload, + "Linear workflowStates payload was missing nodes.", + { details: statesResponse }, + ); + } + + const targetState = states.find( + (s) => + typeof s.name === "string" && + s.name.toLowerCase() === stateName.toLowerCase(), + ); + if (!targetState || typeof targetState.id !== "string") { + throw new TrackerError( + ERROR_CODES.linearUnknownPayload, + `Linear workflow state "${stateName}" not found for team "${teamKey}".`, + { details: { states, targetStateName: stateName } }, + ); + } + + const updateResponse = await this.postGraphql<LinearIssueUpdateData>( + LINEAR_ISSUE_UPDATE_MUTATION, + { issueId, stateId: targetState.id }, + ); + + if (updateResponse.issueUpdate?.success !== true) { + throw new TrackerError( + ERROR_CODES.linearGraphqlErrors, + "Linear issueUpdate mutation did not return success.", + { details: updateResponse }, + ); + } + } + + async checkAndCloseParent( + issueId: string, + terminalStates: string[], + teamKey: string, + ): Promise<void> { + const terminalSet = new Set(terminalStates.map((s) => s.toLowerCase())); + + const response = await this.postGraphql<LinearIssueParentAndSiblingsData>( + LINEAR_ISSUE_PARENT_AND_SIBLINGS_QUERY, + { issueId }, + ); + + const parent = response.issue?.parent; + if (!parent || !parent.id || !parent.identifier) { + // No parent — nothing to do + return; + } + + const siblings = parent.children?.nodes; + if (!Array.isArray(siblings) || siblings.length === 0) { + return; + } + + const allTerminal = siblings.every((sibling) => { + const stateName = sibling.state?.name; + return ( + typeof stateName === "string" && + terminalSet.has(stateName.toLowerCase()) + ); + }); + + if (!allTerminal) { + return; + } + + console.log( + `[orchestrator] Auto-closing parent ${parent.identifier} — all sub-issues complete`, + ); + + await this.updateIssueState(parent.id, "Done", teamKey); + } + async executeRawGraphql( query: string, variables: Record<string, unknown> = {}, diff --git a/src/tracker/linear-queries.ts b/src/tracker/linear-queries.ts index 6068ee40..84d6dffd 100644 --- a/src/tracker/linear-queries.ts +++ b/src/tracker/linear-queries.ts @@ -99,3 +99,120 @@ export const LINEAR_ISSUE_STATES_BY_IDS_QUERY = ` } } `.trim(); + +export const LINEAR_WORKFLOW_STATES_QUERY = ` + query SymphonyWorkflowStates($teamId: String!) { + workflowStates(filter: { team: { key: { eq: $teamId } } }) { + nodes { + id + name + } + } + } +`.trim(); + +export const LINEAR_ISSUE_UPDATE_MUTATION = ` + mutation SymphonyIssueUpdate($issueId: String!, $stateId: String!) { + issueUpdate(id: $issueId, input: { stateId: $stateId }) { + success + issue { + id + state { + name + } + } + } + } +`.trim(); + +export const LINEAR_CREATE_COMMENT_MUTATION = ` + mutation SymphonyCreateComment($issueId: String!, $body: String!) { + commentCreate(input: { issueId: $issueId, body: $body }) { + success + comment { + id + } + } + } +`.trim(); + +export const LINEAR_ISSUES_BY_LABELS_QUERY = ` + query SymphonyIssuesByLabels( + $projectSlug: String! + $labelNames: [String!]! + $first: Int! + $relationFirst: Int! + $after: String + ) { + issues( + first: $first + after: $after + filter: { + project: { slugId: { eq: $projectSlug } } + labels: { name: { in: $labelNames } } + } + orderBy: createdAt + ) { + nodes { + ${ISSUE_FIELDS} + } + pageInfo { + hasNextPage + endCursor + } + } + } +`.trim(); + +export const LINEAR_ISSUE_PARENT_AND_SIBLINGS_QUERY = ` + query SymphonyIssueParentAndSiblings($issueId: String!) { + issue(id: $issueId) { + id + identifier + parent { + id + identifier + state { + name + } + children { + nodes { + id + identifier + state { + name + } + } + } + } + } + } +`.trim(); + +export const LINEAR_OPEN_ISSUES_BY_LABELS_QUERY = ` + query SymphonyOpenIssuesByLabels( + $projectSlug: String! + $labelNames: [String!]! + $excludeStateNames: [String!]! + $first: Int! + $relationFirst: Int! + ) { + issues( + first: $first + filter: { + project: { slugId: { eq: $projectSlug } } + labels: { name: { in: $labelNames } } + state: { name: { nin: $excludeStateNames } } + } + orderBy: createdAt + ) { + nodes { + ${ISSUE_FIELDS} + } + pageInfo { + hasNextPage + endCursor + } + } + } +`.trim(); diff --git a/src/tracker/tracker.ts b/src/tracker/tracker.ts index 79893864..1ec0fea6 100644 --- a/src/tracker/tracker.ts +++ b/src/tracker/tracker.ts @@ -10,4 +10,9 @@ export interface IssueTracker { fetchCandidateIssues(): Promise<Issue[]>; fetchIssuesByStates(stateNames: string[]): Promise<Issue[]>; fetchIssueStatesByIds(issueIds: string[]): Promise<IssueStateSnapshot[]>; + fetchIssuesByLabels?(labelNames: string[]): Promise<Issue[]>; + fetchOpenIssuesByLabels?( + labelNames: string[], + excludeStateNames: string[], + ): Promise<Issue[]>; } diff --git a/src/version.ts b/src/version.ts new file mode 100644 index 00000000..4f366cc3 --- /dev/null +++ b/src/version.ts @@ -0,0 +1,72 @@ +import { execSync } from "node:child_process"; +import { existsSync } from "node:fs"; +import { createRequire } from "node:module"; +import { dirname, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; + +const require = createRequire(import.meta.url); + +/** + * Resolve the path to the project-root package.json. + * Works from both src/version.ts and dist/src/version.js. + */ +function findPackageJson(): string { + let dir = dirname(fileURLToPath(import.meta.url)); + for (let i = 0; i < 5; i++) { + const candidate = resolve(dir, "package.json"); + if (existsSync(candidate)) { + return candidate; + } + dir = dirname(dir); + } + // Fallback — let createRequire throw a clear error if missing. + return resolve(dirname(fileURLToPath(import.meta.url)), "../package.json"); +} + +/** + * The calver version string read from package.json at runtime. + */ +export const VERSION: string = ( + require(findPackageJson()) as { version: string } +).version; + +let cachedGitSha: string | undefined; +let gitShaResolved = false; + +function resolveGitSha(): string | undefined { + if (gitShaResolved) { + return cachedGitSha; + } + gitShaResolved = true; + try { + const sha = execSync("git rev-parse --short=7 HEAD", { + encoding: "utf-8", + stdio: ["ignore", "pipe", "ignore"], + timeout: 5000, + }).trim(); + if (/^[0-9a-f]{7}$/.test(sha)) { + cachedGitSha = sha; + } + } catch { + // git not available or not a git repo — leave undefined + } + return cachedGitSha; +} + +/** + * Returns a display version string including the git SHA suffix when available. + * Format: "VERSION+SHA" (e.g. "0.1.8+abc1234") or just "VERSION" if git is unavailable. + */ +export function getDisplayVersion(): string { + const sha = resolveGitSha(); + return sha ? `${VERSION}+${sha}` : VERSION; +} + +/** + * Reset cached git SHA — only for testing purposes. + * @internal + */ +export function _resetGitShaCache(): void { + cachedGitSha = undefined; + gitShaResolved = false; +} diff --git a/src/workspace/workspace-manager.ts b/src/workspace/workspace-manager.ts index 8c303d68..bad8599c 100644 --- a/src/workspace/workspace-manager.ts +++ b/src/workspace/workspace-manager.ts @@ -24,6 +24,68 @@ export interface WorkspaceManagerOptions { hooks?: WorkspaceHookRunner | null; } +/** + * A simple async mutual-exclusion lock. + * + * Callers acquire the lock with `acquire()`, which returns a `release` + * function. The next waiter is unblocked only after `release()` is called. + * `depth` reflects the total number of callers currently holding or queued + * for the lock, which can be inspected *before* calling `acquire()` to + * determine whether the caller will have to wait. + */ +export class AsyncMutex { + #queue: Promise<void> = Promise.resolve(); + #depth = 0; + + /** Total number of callers holding or waiting for the lock. */ + get depth(): number { + return this.#depth; + } + + /** + * Acquire the lock. Resolves with a `release` function that must be called + * to hand the lock to the next waiter. + */ + acquire(): Promise<() => void> { + this.#depth++; + + let unlock!: () => void; + const prev = this.#queue; + this.#queue = this.#queue.then( + () => + new Promise<void>((resolve) => { + unlock = resolve; + }), + ); + + return prev.then(() => { + const release = () => { + this.#depth--; + unlock(); + }; + return release; + }); + } +} + +/** + * Module-level registry of per-root creation mutexes. + * + * Keyed by `workspaceRoot` (the normalised bare-clone path) so that + * concurrent creations for the same repo are serialised while creations + * for different repos can proceed independently. + */ +const creationMutexes = new Map<string, AsyncMutex>(); + +function getCreationMutex(workspaceRoot: string): AsyncMutex { + let mutex = creationMutexes.get(workspaceRoot); + if (!mutex) { + mutex = new AsyncMutex(); + creationMutexes.set(workspaceRoot, mutex); + } + return mutex; +} + export class WorkspaceManager { readonly root: string; readonly #fs: FileSystemLike; @@ -53,10 +115,28 @@ export class WorkspaceManager { }; if (createdNow) { - await this.#hooks?.run({ - name: "afterCreate", - workspacePath, - }); + const mutex = getCreationMutex(workspaceRoot); + const queueDepth = mutex.depth; + + if (queueDepth > 0) { + console.log( + `[workspace] afterCreate for ${workspacePath} is queued (depth: ${queueDepth})`, + ); + } else { + console.log( + `[workspace] afterCreate for ${workspacePath} is executing`, + ); + } + + const release = await mutex.acquire(); + try { + await this.#hooks?.run({ + name: "afterCreate", + workspacePath, + }); + } finally { + release(); + } } return workspace; diff --git a/tests/agent/prompt-builder.test.ts b/tests/agent/prompt-builder.test.ts index f4cbf464..241f5604 100644 --- a/tests/agent/prompt-builder.test.ts +++ b/tests/agent/prompt-builder.test.ts @@ -78,6 +78,31 @@ describe("prompt builder", () => { expect(prompt).toBe("first-run"); }); + it("makes stageName available in the template context", async () => { + const prompt = await renderPrompt({ + workflow: { + promptTemplate: + '{% if stageName == "investigate" %}research{% else %}build{% endif %}', + }, + issue: ISSUE_FIXTURE, + attempt: null, + stageName: "investigate", + }); + + expect(prompt).toBe("research"); + + const promptNull = await renderPrompt({ + workflow: { + promptTemplate: + "{% if stageName == nil %}no-stage{% else %}has-stage{% endif %}", + }, + issue: ISSUE_FIXTURE, + attempt: null, + }); + + expect(promptNull).toBe("no-stage"); + }); + it("uses the rendered workflow prompt for the first turn and continuation guidance after that", async () => { const first = await buildTurnPrompt({ workflow: { @@ -150,6 +175,104 @@ describe("prompt builder", () => { } satisfies Partial<PromptTemplateError>); }); + it("includes investigate constraints and STAGE_COMPLETE in continuation when stageName is investigate", () => { + const prompt = buildContinuationPrompt({ + issue: ISSUE_FIXTURE, + attempt: null, + turnNumber: 2, + maxTurns: 5, + stageName: "investigate", + }); + + expect(prompt).toContain("Current stage: investigate."); + expect(prompt).toContain("Do NOT implement code"); + expect(prompt).toContain("[STAGE_COMPLETE]"); + }); + + it("includes implement constraints and STAGE_COMPLETE in continuation when stageName is implement", () => { + const prompt = buildContinuationPrompt({ + issue: ISSUE_FIXTURE, + attempt: null, + turnNumber: 2, + maxTurns: 5, + stageName: "implement", + }); + + expect(prompt).toContain("Current stage: implement."); + expect(prompt).toContain("IMPLEMENT stage"); + expect(prompt).toContain("[STAGE_COMPLETE]"); + }); + + it("does not include STAGE_COMPLETE in continuation when stageName is null", () => { + const prompt = buildContinuationPrompt({ + issue: ISSUE_FIXTURE, + attempt: null, + turnNumber: 2, + maxTurns: 5, + stageName: null, + }); + + expect(prompt).not.toContain("[STAGE_COMPLETE]"); + expect(prompt).not.toContain("Current stage:"); + }); + + it("passes stageName through buildTurnPrompt to continuation on turn > 1", async () => { + const prompt = await buildTurnPrompt({ + workflow: { + promptTemplate: "Initial {{ issue.identifier }}", + }, + issue: ISSUE_FIXTURE, + attempt: null, + stageName: "investigate", + turnNumber: 2, + maxTurns: 4, + }); + + expect(prompt).toContain("Current stage: investigate."); + expect(prompt).toContain("Do NOT implement code"); + expect(prompt).toContain("[STAGE_COMPLETE]"); + }); + + it("makes reworkCount available in the template context, defaulting to 0", async () => { + const prompt = await renderPrompt({ + workflow: { + promptTemplate: "rework={{ reworkCount }}", + }, + issue: ISSUE_FIXTURE, + attempt: null, + }); + + expect(prompt).toBe("rework=0"); + }); + + it("renders reworkCount when explicitly provided", async () => { + const prompt = await renderPrompt({ + workflow: { + promptTemplate: + "{% if reworkCount > 0 %}rework attempt {{ reworkCount }}{% else %}first attempt{% endif %}", + }, + issue: ISSUE_FIXTURE, + attempt: null, + reworkCount: 3, + }); + + expect(prompt).toBe("rework attempt 3"); + }); + + it("renders reworkCount as 0 on first attempt", async () => { + const prompt = await renderPrompt({ + workflow: { + promptTemplate: + "{% if reworkCount > 0 %}rework attempt {{ reworkCount }}{% else %}first attempt{% endif %}", + }, + issue: ISSUE_FIXTURE, + attempt: null, + reworkCount: 0, + }); + + expect(prompt).toBe("first attempt"); + }); + it("reports invalid template syntax as a parse error", async () => { await expect( renderPrompt({ diff --git a/tests/agent/runner.test.ts b/tests/agent/runner.test.ts index 95c195d3..ac311537 100644 --- a/tests/agent/runner.test.ts +++ b/tests/agent/runner.test.ts @@ -154,6 +154,70 @@ describe("AgentRunner", () => { expect(prompts[1]).not.toContain("Initial prompt for ABC-123 attempt=2"); }); + it("emits promptChars and estimatedPromptTokens on agent events, with turn 1 larger than turn 2 for a long template", async () => { + const root = await createRoot(); + const prompts: string[] = []; + const capturedEvents: Array<{ + event: string; + promptChars: number | undefined; + estimatedPromptTokens: number | undefined; + turnCount: number; + }> = []; + const tracker = createTracker({ + refreshStates: [ + { id: "issue-1", identifier: "ABC-123", state: "In Progress" }, + { id: "issue-1", identifier: "ABC-123", state: "Human Review" }, + ], + }); + // Use a long template (>600 chars) so turn 1 prompt is larger than the continuation prompt + const longTemplate = + "You are an expert software engineer working on the following issue.\n\nIssue: {{ issue.identifier }}\nTitle: {{ issue.title }}\nDescription: {{ issue.description }}\nState: {{ issue.state }}\nAttempt: {{ attempt }}\n\nInstructions:\n- Read the issue description carefully.\n- Implement all required changes.\n- Write tests for any new functionality.\n- Run the full test suite and fix any failures.\n- Follow the existing code style and conventions.\n- Write clear commit messages.\n- Open a pull request when done.\n- Do not modify unrelated code.\n- Do not skip tests.\n- Document any architectural decisions.\n"; + const runner = new AgentRunner({ + config: { ...createConfig(root, "unused"), promptTemplate: longTemplate }, + tracker, + onEvent: (event) => { + capturedEvents.push({ + event: event.event, + promptChars: event.promptChars, + estimatedPromptTokens: event.estimatedPromptTokens, + turnCount: event.turnCount, + }); + }, + createCodexClient: (input) => + createStubCodexClient(prompts, input, { + statuses: ["completed", "completed"], + }), + }); + + await runner.run({ + issue: ISSUE_FIXTURE, + attempt: null, + }); + + expect(prompts).toHaveLength(2); + + // Events for turn 1 should carry turn 1 prompt metrics + const turn1Events = capturedEvents.filter((e) => e.turnCount === 1); + expect(turn1Events.length).toBeGreaterThan(0); + const turn1PromptChars = turn1Events[0]?.promptChars; + expect(turn1PromptChars).toBe(prompts[0]?.length); + expect(turn1Events[0]?.estimatedPromptTokens).toBe( + Math.ceil((turn1PromptChars ?? 0) / 4), + ); + + // Events for turn 2 should carry turn 2 prompt metrics + const turn2Events = capturedEvents.filter((e) => e.turnCount === 2); + expect(turn2Events.length).toBeGreaterThan(0); + const turn2PromptChars = turn2Events[0]?.promptChars; + expect(turn2PromptChars).toBe(prompts[1]?.length); + expect(turn2Events[0]?.estimatedPromptTokens).toBe( + Math.ceil((turn2PromptChars ?? 0) / 4), + ); + + // Turn 1 (full WORKFLOW template) should be larger than turn 2 (continuation) + expect(turn1PromptChars).toBeGreaterThan(turn2PromptChars ?? 0); + }); + it("fails immediately when before_run fails and still invokes after_run best-effort", async () => { const root = await createRoot(); const hooks = { @@ -293,6 +357,359 @@ describe("AgentRunner", () => { }); }); + it("removes existing workspace on fresh dispatch at initial stage", async () => { + const root = await createRoot(); + const workspacePath = join(root, "issue-1"); + const removeForIssue = vi.fn().mockResolvedValue(true); + const createForIssue = vi.fn().mockResolvedValue({ + path: workspacePath, + workspaceKey: "issue-1", + createdNow: true, + }); + const mockWorkspaceManager = { + root, + createForIssue, + removeForIssue, + resolveForIssue: vi.fn(), + }; + const config = createConfig(root, "unused"); + config.stages = { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: 3, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "done", onApprove: null, onRework: null }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const runner = new AgentRunner({ + config, + tracker: createTracker({ + refreshStates: [ + { id: "issue-1", identifier: "ABC-123", state: "Done" }, + ], + }), + workspaceManager: mockWorkspaceManager as never, + createCodexClient: (input) => + createStubCodexClient([], input, { + statuses: ["completed"], + }), + }); + + await runner.run({ + issue: ISSUE_FIXTURE, + attempt: null, + stageName: "investigate", + }); + + expect(removeForIssue).toHaveBeenCalledWith("issue-1"); + expect(createForIssue).toHaveBeenCalledWith("issue-1"); + }); + + it("does NOT remove workspace on flat dispatch (no stages)", async () => { + const root = await createRoot(); + const workspacePath = join(root, "issue-1"); + const removeForIssue = vi.fn().mockResolvedValue(true); + const createForIssue = vi.fn().mockResolvedValue({ + path: workspacePath, + workspaceKey: "issue-1", + createdNow: false, + }); + const mockWorkspaceManager = { + root, + createForIssue, + removeForIssue, + resolveForIssue: vi.fn(), + }; + const runner = new AgentRunner({ + config: createConfig(root, "unused"), + tracker: createTracker({ + refreshStates: [ + { id: "issue-1", identifier: "ABC-123", state: "Done" }, + ], + }), + workspaceManager: mockWorkspaceManager as never, + createCodexClient: (input) => + createStubCodexClient([], input, { + statuses: ["completed"], + }), + }); + + await runner.run({ + issue: ISSUE_FIXTURE, + attempt: null, + }); + + expect(removeForIssue).not.toHaveBeenCalled(); + expect(createForIssue).toHaveBeenCalledWith("issue-1"); + }); + + it("does NOT remove workspace on continuation (attempt !== null)", async () => { + const root = await createRoot(); + const workspacePath = join(root, "issue-1"); + const removeForIssue = vi.fn().mockResolvedValue(true); + const createForIssue = vi.fn().mockResolvedValue({ + path: workspacePath, + workspaceKey: "issue-1", + createdNow: false, + }); + const mockWorkspaceManager = { + root, + createForIssue, + removeForIssue, + resolveForIssue: vi.fn(), + }; + const runner = new AgentRunner({ + config: createConfig(root, "unused"), + tracker: createTracker({ + refreshStates: [ + { id: "issue-1", identifier: "ABC-123", state: "Done" }, + ], + }), + workspaceManager: mockWorkspaceManager as never, + createCodexClient: (input) => + createStubCodexClient([], input, { + statuses: ["completed"], + }), + }); + + await runner.run({ + issue: ISSUE_FIXTURE, + attempt: 1, + }); + + expect(removeForIssue).not.toHaveBeenCalled(); + expect(createForIssue).toHaveBeenCalledWith("issue-1"); + }); + + it("breaks the turn loop early when the agent emits [STAGE_COMPLETE]", async () => { + const root = await createRoot(); + const tracker = createTracker({ + refreshStates: [ + // Would keep going if not for early exit — issue stays active + { id: "issue-1", identifier: "ABC-123", state: "In Progress" }, + { id: "issue-1", identifier: "ABC-123", state: "In Progress" }, + ], + }); + const runner = new AgentRunner({ + config: createConfig(root, "unused"), + tracker, + createCodexClient: (input) => { + let turn = 0; + return { + async startSession({ prompt }: { prompt: string; title: string }) { + turn += 1; + input.onEvent({ + event: "session_started", + timestamp: new Date().toISOString(), + codexAppServerPid: "1001", + sessionId: `thread-1-turn-${turn}`, + threadId: "thread-1", + turnId: `turn-${turn}`, + }); + return { + status: "completed" as const, + threadId: "thread-1", + turnId: `turn-${turn}`, + sessionId: `thread-1-turn-${turn}`, + usage: null, + rateLimits: null, + message: "Done with investigation.\n[STAGE_COMPLETE]", + }; + }, + async continueTurn(prompt: string) { + turn += 1; + input.onEvent({ + event: "session_started", + timestamp: new Date().toISOString(), + codexAppServerPid: "1001", + sessionId: `thread-1-turn-${turn}`, + threadId: "thread-1", + turnId: `turn-${turn}`, + }); + return { + status: "completed" as const, + threadId: "thread-1", + turnId: `turn-${turn}`, + sessionId: `thread-1-turn-${turn}`, + usage: null, + rateLimits: null, + message: `turn ${turn}`, + }; + }, + close: vi.fn().mockResolvedValue(undefined), + }; + }, + }); + + const result = await runner.run({ + issue: ISSUE_FIXTURE, + attempt: null, + stageName: "investigate", + }); + + // maxTurns is 3, but should break after turn 1 due to [STAGE_COMPLETE] + expect(result.turnsCompleted).toBe(1); + expect(result.runAttempt.status).toBe("succeeded"); + // refreshIssueState should NOT have been called since we broke before it + expect(tracker.fetchIssueStatesByIds).not.toHaveBeenCalled(); + }); + + it("breaks the turn loop early when the agent emits [STAGE_FAILED: ...]", async () => { + const root = await createRoot(); + const tracker = createTracker({ + refreshStates: [ + { id: "issue-1", identifier: "ABC-123", state: "In Progress" }, + { id: "issue-1", identifier: "ABC-123", state: "In Progress" }, + ], + }); + const runner = new AgentRunner({ + config: createConfig(root, "unused"), + tracker, + createCodexClient: (input) => { + let turn = 0; + return { + async startSession({ prompt }: { prompt: string; title: string }) { + turn += 1; + input.onEvent({ + event: "session_started", + timestamp: new Date().toISOString(), + codexAppServerPid: "1001", + sessionId: `thread-1-turn-${turn}`, + threadId: "thread-1", + turnId: `turn-${turn}`, + }); + return { + status: "completed" as const, + threadId: "thread-1", + turnId: `turn-${turn}`, + sessionId: `thread-1-turn-${turn}`, + usage: null, + rateLimits: null, + message: "Tests failed.\n[STAGE_FAILED: verify]\nSee logs.", + }; + }, + async continueTurn(prompt: string) { + turn += 1; + input.onEvent({ + event: "session_started", + timestamp: new Date().toISOString(), + codexAppServerPid: "1001", + sessionId: `thread-1-turn-${turn}`, + threadId: "thread-1", + turnId: `turn-${turn}`, + }); + return { + status: "completed" as const, + threadId: "thread-1", + turnId: `turn-${turn}`, + sessionId: `thread-1-turn-${turn}`, + usage: null, + rateLimits: null, + message: `turn ${turn}`, + }; + }, + close: vi.fn().mockResolvedValue(undefined), + }; + }, + }); + + const result = await runner.run({ + issue: ISSUE_FIXTURE, + attempt: null, + stageName: "implement", + }); + + // maxTurns is 3, but should break after turn 1 due to [STAGE_FAILED: verify] + expect(result.turnsCompleted).toBe(1); + expect(result.lastTurn?.message).toContain("[STAGE_FAILED: verify]"); + }); + + it("throws AgentRunnerError when a turn fails without a STAGE_FAILED signal", async () => { + const root = await createRoot(); + const tracker = createTracker({ + refreshStates: [ + { id: "issue-1", identifier: "ABC-123", state: "In Progress" }, + ], + }); + const runner = new AgentRunner({ + config: createConfig(root, "unused"), + tracker, + createCodexClient: (input) => + createStubCodexClient([], input, { + statuses: ["failed"], + messages: ["The operation was aborted"], + }), + }); + + await expect( + runner.run({ + issue: ISSUE_FIXTURE, + attempt: null, + }), + ).rejects.toMatchObject({ + name: "AgentRunnerError", + status: "failed", + failedPhase: "initializing_session", + message: "The operation was aborted", + } satisfies Partial<AgentRunnerError>); + + // Should NOT have called refreshIssueState since we threw before it + expect(tracker.fetchIssueStatesByIds).not.toHaveBeenCalled(); + }); + + it("returns succeeded when infrastructure marks turn failed but agent emitted STAGE_FAILED signal", async () => { + const root = await createRoot(); + const tracker = createTracker({ + refreshStates: [ + { id: "issue-1", identifier: "ABC-123", state: "In Progress" }, + ], + }); + const runner = new AgentRunner({ + config: createConfig(root, "unused"), + tracker, + createCodexClient: (input) => + createStubCodexClient([], input, { + statuses: ["failed"], + messages: ["Tests failed.\n[STAGE_FAILED: verify]\nSee logs."], + }), + }); + + const result = await runner.run({ + issue: ISSUE_FIXTURE, + attempt: null, + }); + + // STAGE_FAILED is an intentional agent signal — runner should succeed + expect(result.runAttempt.status).toBe("succeeded"); + expect(result.lastTurn?.message).toContain("[STAGE_FAILED: verify]"); + }); + it("cancels the run when the orchestrator aborts the worker signal", async () => { const root = await createRoot(); const close = vi.fn().mockResolvedValue(undefined); @@ -360,6 +777,7 @@ function createStubCodexClient( overrides?: Partial<{ close: ReturnType<typeof vi.fn>; statuses: Array<"completed" | "failed" | "cancelled">; + messages: Array<string | null>; startSession: (input: { prompt: string; title: string }) => Promise<{ status: "completed" | "failed" | "cancelled"; threadId: string; @@ -377,6 +795,7 @@ function createStubCodexClient( ) { let turn = 0; const statuses = overrides?.statuses ?? ["completed"]; + const messages = overrides?.messages; return { async startSession({ prompt, title }: { prompt: string; title: string }) { @@ -407,7 +826,9 @@ function createStubCodexClient( rateLimits: { requestsRemaining: 10 - turn, }, - message: `turn ${turn}`, + message: messages + ? (messages[turn - 1] ?? `turn ${turn}`) + : `turn ${turn}`, }; }, async continueTurn(prompt: string) { @@ -434,7 +855,9 @@ function createStubCodexClient( rateLimits: { requestsRemaining: 10 - turn, }, - message: `turn ${turn}`, + message: messages + ? (messages[turn - 1] ?? `turn ${turn}`) + : `turn ${turn}`, }; }, close: overrides?.close ?? vi.fn().mockResolvedValue(undefined), @@ -486,6 +909,7 @@ function createConfig(root: string, scenario: string): ResolvedWorkflowConfig { maxConcurrentAgents: 2, maxTurns: 3, maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, maxConcurrentAgentsByState: {}, }, codex: { @@ -501,12 +925,19 @@ function createConfig(root: string, scenario: string): ResolvedWorkflowConfig { }, server: { port: null, + slackNotifyChannel: null, }, observability: { dashboardEnabled: true, refreshMs: 1_000, renderIntervalMs: 16, }, + runner: { + kind: "codex", + model: null, + }, + stages: null, + escalationState: null, }; } diff --git a/tests/chunking.test.ts b/tests/chunking.test.ts new file mode 100644 index 00000000..638c645a --- /dev/null +++ b/tests/chunking.test.ts @@ -0,0 +1,108 @@ +import { describe, expect, it } from "vitest"; + +import { SLACK_MAX_CHARS, chunkResponse } from "../src/chunking.js"; + +describe("chunkResponse", () => { + it("returns a single chunk for text under the limit", () => { + const text = "Short response"; + const chunks = chunkResponse(text); + expect(chunks).toEqual(["Short response"]); + }); + + it("splits an 80K char response into 3 messages, each under 39K chars", () => { + // Build an 80,000 char response from paragraphs, each ~1,000 chars + const paragraphSize = 1000; + const paragraphCount = 80; + const paragraphs: string[] = []; + for (let i = 0; i < paragraphCount; i++) { + paragraphs.push( + `Paragraph ${i + 1}: ${"x".repeat(paragraphSize - `Paragraph ${i + 1}: `.length)}`, + ); + } + const fullText = paragraphs.join("\n\n"); + expect(fullText.length).toBeGreaterThanOrEqual(80_000); + + const chunks = chunkResponse(fullText); + + // Each chunk must be under 39K chars + for (const chunk of chunks) { + expect(chunk.length).toBeLessThanOrEqual(SLACK_MAX_CHARS); + } + + // 80K split into 39K chunks → expect 3 chunks + expect(chunks).toHaveLength(3); + }); + + it("splits at paragraph boundaries when possible", () => { + // Create two paragraphs that together exceed the limit + const halfLimit = Math.floor(SLACK_MAX_CHARS / 2); + const paragraph1 = "A".repeat(halfLimit); + const paragraph2 = "B".repeat(halfLimit); + const paragraph3 = "C".repeat(halfLimit); + const text = `${paragraph1}\n\n${paragraph2}\n\n${paragraph3}`; + + const chunks = chunkResponse(text); + + // Should split at paragraph boundaries, not mid-text + expect(chunks.length).toBeGreaterThanOrEqual(2); + for (const chunk of chunks) { + expect(chunk.length).toBeLessThanOrEqual(SLACK_MAX_CHARS); + } + + // Verify content is preserved (join with paragraph separator) + const rejoined = chunks.join("\n\n"); + expect(rejoined).toBe(text); + }); + + it("hard-splits a single paragraph exceeding the limit", () => { + const oversizedParagraph = "Z".repeat(SLACK_MAX_CHARS + 5000); + const chunks = chunkResponse(oversizedParagraph); + + expect(chunks.length).toBe(2); + for (const chunk of chunks) { + expect(chunk.length).toBeLessThanOrEqual(SLACK_MAX_CHARS); + } + + // Content is preserved + expect(chunks.join("")).toBe(oversizedParagraph); + }); + + it("posts all chunks to the same thread (all chunks returned in order)", () => { + // This tests that chunkResponse returns an ordered array + // The caller (handler) posts each chunk to thread.post() sequentially + const paragraphs: string[] = []; + for (let i = 0; i < 50; i++) { + paragraphs.push(`Section ${i + 1}: ${"x".repeat(1400)}`); + } + const text = paragraphs.join("\n\n"); + + const chunks = chunkResponse(text); + + // Verify ordering: reassembling chunks should give back the original text + const reassembled = chunks.join("\n\n"); + expect(reassembled).toBe(text); + + // All chunks should be under the limit + for (const chunk of chunks) { + expect(chunk.length).toBeLessThanOrEqual(SLACK_MAX_CHARS); + } + + // Multiple chunks required for this large text + expect(chunks.length).toBeGreaterThan(1); + }); + + it("handles text with only whitespace paragraphs", () => { + const text = "Hello\n\n \n\n\n\nWorld"; + const chunks = chunkResponse(text); + // Should filter empty paragraphs but since total is small, single chunk + expect(chunks).toHaveLength(1); + }); + + it("uses custom maxChars when provided", () => { + const text = `${"A".repeat(100)}\n\n${"B".repeat(100)}`; + const chunks = chunkResponse(text, 150); + expect(chunks).toHaveLength(2); + expect(chunks[0]).toBe("A".repeat(100)); + expect(chunks[1]).toBe("B".repeat(100)); + }); +}); diff --git a/tests/cli/global-error-handlers.test.ts b/tests/cli/global-error-handlers.test.ts new file mode 100644 index 00000000..c86abfa0 --- /dev/null +++ b/tests/cli/global-error-handlers.test.ts @@ -0,0 +1,125 @@ +import type { MockInstance } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +const mockWriteSync = vi.hoisted(() => vi.fn()); + +vi.mock("node:fs", async (importOriginal) => { + const actual = await importOriginal<typeof import("node:fs")>(); + return { + ...actual, + writeSync: mockWriteSync, + }; +}); + +import { + handleUncaughtException, + handleUnhandledRejection, +} from "../../src/cli/main.js"; + +describe("global error handlers", () => { + let exitSpy: MockInstance; + + beforeEach(() => { + mockWriteSync.mockReturnValue(0); + exitSpy = vi + .spyOn(process, "exit") + .mockImplementation(() => undefined as never); + }); + + afterEach(() => { + process.exitCode = undefined; + mockWriteSync.mockClear(); + vi.restoreAllMocks(); + }); + + it("handleUncaughtException logs structured JSON and exits with code 70", () => { + const error = new Error("kaboom"); + + handleUncaughtException(error); + + expect(mockWriteSync).toHaveBeenCalledOnce(); + const written = mockWriteSync.mock.calls[0]![1] as string; + const entry = JSON.parse(written.trimEnd()); + + expect(entry.level).toBe("error"); + expect(entry.event).toBe("process_crash"); + expect(entry.error_code).toBe("uncaught_exception"); + expect(entry.message).toBe("kaboom"); + expect(entry.stack).toContain("kaboom"); + expect(entry.timestamp).toBeDefined(); + expect(process.exitCode).toBe(70); + expect(exitSpy).toHaveBeenCalledWith(70); + }); + + it("handleUncaughtException handles non-Error values", () => { + handleUncaughtException("string rejection"); + + const written = mockWriteSync.mock.calls[0]![1] as string; + const entry = JSON.parse(written.trimEnd()); + + expect(entry.message).toBe("string rejection"); + expect(entry.stack).toBeUndefined(); + expect(entry.error_code).toBe("uncaught_exception"); + }); + + it("handleUncaughtException handles non-stringifiable values", () => { + const obj = Object.create(null); + obj.toString = () => { + throw new Error("toString threw"); + }; + + handleUncaughtException(obj); + + const written = mockWriteSync.mock.calls[0]![1] as string; + const entry = JSON.parse(written.trimEnd()); + + expect(entry.message).toBe("[non-stringifiable value]"); + expect(entry.stack).toBeUndefined(); + expect(entry.error_code).toBe("uncaught_exception"); + }); + + it("handleUnhandledRejection logs structured JSON and exits with code 70", () => { + const reason = new Error("promise failed"); + + handleUnhandledRejection(reason); + + expect(mockWriteSync).toHaveBeenCalledOnce(); + const written = mockWriteSync.mock.calls[0]![1] as string; + const entry = JSON.parse(written.trimEnd()); + + expect(entry.level).toBe("error"); + expect(entry.event).toBe("process_crash"); + expect(entry.error_code).toBe("unhandled_rejection"); + expect(entry.message).toBe("promise failed"); + expect(entry.stack).toContain("promise failed"); + expect(process.exitCode).toBe(70); + expect(exitSpy).toHaveBeenCalledWith(70); + }); + + it("handleUnhandledRejection handles non-Error values", () => { + handleUnhandledRejection(42); + + const written = mockWriteSync.mock.calls[0]![1] as string; + const entry = JSON.parse(written.trimEnd()); + + expect(entry.message).toBe("42"); + expect(entry.stack).toBeUndefined(); + expect(entry.error_code).toBe("unhandled_rejection"); + }); + + it("handleUnhandledRejection handles non-stringifiable values", () => { + const obj = Object.create(null); + obj.toString = () => { + throw new Error("toString threw"); + }; + + handleUnhandledRejection(obj); + + const written = mockWriteSync.mock.calls[0]![1] as string; + const entry = JSON.parse(written.trimEnd()); + + expect(entry.message).toBe("[non-stringifiable value]"); + expect(entry.stack).toBeUndefined(); + expect(entry.error_code).toBe("unhandled_rejection"); + }); +}); diff --git a/tests/cli/main.test.ts b/tests/cli/main.test.ts index cb134a22..29faf20b 100644 --- a/tests/cli/main.test.ts +++ b/tests/cli/main.test.ts @@ -7,6 +7,7 @@ import { describe, expect, it, vi } from "vitest"; import { CLI_ACKNOWLEDGEMENT_FLAG, + type StartCliHostInput, applyCliOverrides, parseCliArgs, runCli, @@ -30,6 +31,7 @@ describe("cli", () => { port: 8080, acknowledged: true, help: false, + version: false, }); }); @@ -47,6 +49,7 @@ describe("cli", () => { createConfig({ server: { port: 3000, + slackNotifyChannel: null, }, }), { @@ -55,6 +58,7 @@ describe("cli", () => { port: 8080, acknowledged: true, help: false, + version: false, }, "/repo", ); @@ -216,6 +220,51 @@ describe("cli", () => { "Symphony host exited abnormally with code 3.\n", ); }); + + it("prints version and exits 0 when --version is passed", async () => { + const stdout = vi.fn(); + const exitCode = await runCli(["--version"], { + io: { stdout, stderr: vi.fn() }, + }); + expect(exitCode).toBe(0); + expect(stdout).toHaveBeenCalledWith( + expect.stringMatching(/^symphony-ts .+\n$/), + ); + }); + + it("env with SLACK_BOT_TOKEN is forwarded to startHost", async () => { + const capturedInput: StartCliHostInput[] = []; + const startHost = vi.fn(async (input: StartCliHostInput) => { + capturedInput.push(input); + return { + async waitForExit() { + return 0; + }, + }; + }); + + const injectedEnv = { SLACK_BOT_TOKEN: "xoxb-injected-token" }; + + await runCli([CLI_ACKNOWLEDGEMENT_FLAG], { + env: injectedEnv, + loadWorkflowDefinition: vi.fn(async () => ({ + workflowPath: "/repo/WORKFLOW.md", + config: {}, + promptTemplate: "Prompt", + })), + startHost, + }); + + expect(startHost).toHaveBeenCalledOnce(); + expect(capturedInput[0]?.env).toBe(injectedEnv); + expect(capturedInput[0]?.env.SLACK_BOT_TOKEN).toBe("xoxb-injected-token"); + }); + + it("parses --version flag", () => { + expect(parseCliArgs(["--version"])).toEqual( + expect.objectContaining({ version: true }), + ); + }); }); function createConfig( @@ -249,6 +298,7 @@ function createConfig( maxConcurrentAgents: 10, maxTurns: 20, maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, maxConcurrentAgentsByState: {}, }, codex: { @@ -262,12 +312,19 @@ function createConfig( }, server: { port: null, + slackNotifyChannel: null, }, observability: { dashboardEnabled: true, refreshMs: 1_000, renderIntervalMs: 16, }, + runner: { + kind: "codex", + model: null, + }, + stages: null, + escalationState: null, ...overrides, }; } diff --git a/tests/cli/runtime-integration.test.ts b/tests/cli/runtime-integration.test.ts index 80d5738b..fd629a13 100644 --- a/tests/cli/runtime-integration.test.ts +++ b/tests/cli/runtime-integration.test.ts @@ -68,6 +68,7 @@ describe("runtime integration", () => { }, server: { port: 0, + slackNotifyChannel: null, }, }), logsRoot, @@ -98,6 +99,7 @@ describe("runtime integration", () => { const logFile = await readFile(join(logsRoot, "symphony.jsonl"), "utf8"); expect(logFile).toContain('"event":"runtime_starting"'); + expect(logFile).toContain('"symphony_version"'); expect(tracker.fetchIssuesByStates).toHaveBeenCalledWith([ "Done", "Canceled", @@ -578,6 +580,7 @@ function createConfig( maxConcurrentAgents: 10, maxTurns: 20, maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, maxConcurrentAgentsByState: {}, }, codex: { @@ -591,12 +594,19 @@ function createConfig( }, server: { port: null, + slackNotifyChannel: null, }, observability: { dashboardEnabled: true, refreshMs: 1_000, renderIntervalMs: 16, }, + runner: { + kind: "codex", + model: null, + }, + stages: null, + escalationState: null, ...overrides, }; } diff --git a/tests/codex/app-server-client.test.ts b/tests/codex/app-server-client.test.ts index ea0485a2..cc556aac 100644 --- a/tests/codex/app-server-client.test.ts +++ b/tests/codex/app-server-client.test.ts @@ -47,6 +47,8 @@ describe("CodexAppServerClient", () => { inputTokens: 14, outputTokens: 9, totalTokens: 23, + cacheReadTokens: 4, + reasoningTokens: 2, }, rateLimits: { requestsRemaining: 10, @@ -156,7 +158,8 @@ describe("CodexAppServerClient", () => { usage: { inputTokens: 20, outputTokens: 10, - totalTokens: 30, + totalTokens: 30, // computed from input + output when total_tokens absent + cacheReadTokens: 5, // extracted from cache_read_input_tokens }, rateLimits: { requests_remaining: 9, diff --git a/tests/codex/workpad-sync-tool.test.ts b/tests/codex/workpad-sync-tool.test.ts new file mode 100644 index 00000000..cfca55b3 --- /dev/null +++ b/tests/codex/workpad-sync-tool.test.ts @@ -0,0 +1,394 @@ +import { mkdtemp, rm, writeFile } from "node:fs/promises"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +import { createWorkpadSyncDynamicTool } from "../../src/index.js"; + +describe("createWorkpadSyncDynamicTool", () => { + let tempDir: string; + let workpadPath: string; + + beforeEach(async () => { + tempDir = await mkdtemp(join(tmpdir(), "workpad-sync-test-")); + workpadPath = join(tempDir, "workpad.md"); + await writeFile(workpadPath, "# Workpad\n\n## Status\nIn progress."); + }); + + afterEach(async () => { + await rm(tempDir, { recursive: true, force: true }); + }); + + it("creates a new comment and returns the comment_id", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: { + commentCreate: { + success: true, + comment: { id: "comment-abc-123" }, + }, + }, + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + + expect(result).toEqual({ + success: true, + comment_id: "comment-abc-123", + }); + + expect(fetchFn).toHaveBeenCalledOnce(); + const [url, init] = fetchFn.mock.calls[0]!; + expect(url).toBe("https://api.linear.app/graphql"); + expect(init?.method).toBe("POST"); + const body = JSON.parse(init?.body as string); + expect(body.variables.issueId).toBe("issue-1"); + expect(body.variables.body).toBe("# Workpad\n\n## Status\nIn progress."); + expect(body.query).toContain("commentCreate"); + }); + + it("updates an existing comment when comment_id is provided", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: { + commentUpdate: { + success: true, + }, + }, + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + comment_id: "comment-existing-456", + }); + + expect(result).toEqual({ + success: true, + comment_id: "comment-existing-456", + }); + + expect(fetchFn).toHaveBeenCalledOnce(); + const body = JSON.parse(fetchFn.mock.calls[0]![1]?.body as string); + expect(body.variables.commentId).toBe("comment-existing-456"); + expect(body.query).toContain("commentUpdate"); + }); + + it("returns file_read_error when file does not exist", async () => { + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn: vi.fn<typeof fetch>(), + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: "/nonexistent/workpad.md", + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "file_read_error", + }, + }); + }); + + it("rejects missing issue_id", async () => { + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn: vi.fn<typeof fetch>(), + }); + + const result = await tool.execute({ + file_path: workpadPath, + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "invalid_input", + message: "sync_workpad.issue_id must be a non-empty string.", + }, + }); + }); + + it("rejects missing file_path", async () => { + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn: vi.fn<typeof fetch>(), + }); + + const result = await tool.execute({ + issue_id: "issue-1", + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "invalid_input", + message: "sync_workpad.file_path must be a non-empty string.", + }, + }); + }); + + it("rejects non-object input", async () => { + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn: vi.fn<typeof fetch>(), + }); + + const result = await tool.execute("just a string"); + + expect(result).toMatchObject({ + success: false, + error: { + code: "invalid_input", + message: "sync_workpad expects an object with issue_id and file_path.", + }, + }); + }); + + it("rejects non-string comment_id", async () => { + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn: vi.fn<typeof fetch>(), + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + comment_id: 123, + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "invalid_input", + message: "sync_workpad.comment_id must be a string if provided.", + }, + }); + }); + + it("returns error when Linear API returns HTTP error", async () => { + const fetchFn = vi + .fn<typeof fetch>() + .mockResolvedValue( + new Response("Internal Server Error", { status: 500 }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "linear_api_request", + message: "Linear API returned HTTP 500.", + }, + }); + }); + + it("returns error when Linear API returns GraphQL errors", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: null, + errors: [{ message: "forbidden" }], + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "linear_api_request", + }, + }); + }); + + it("returns error when commentCreate returns no comment id", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: { + commentCreate: { + success: false, + }, + }, + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "linear_response_malformed", + }, + }); + }); + + it("returns error when fetch itself throws (network failure)", async () => { + const fetchFn = vi + .fn<typeof fetch>() + .mockRejectedValue(new Error("network down")); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + + expect(result).toMatchObject({ + success: false, + error: { + code: "linear_api_request", + message: "network down", + }, + }); + }); + + it("uses custom endpoint when provided", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: { + commentCreate: { + success: true, + comment: { id: "comment-999" }, + }, + }, + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + endpoint: "https://custom.linear.dev/graphql", + fetchFn, + }); + + await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + + expect(fetchFn.mock.calls[0]![0]).toBe("https://custom.linear.dev/graphql"); + }); + + it("returns error when commentCreate has no comment field", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: { + commentCreate: { + success: true, + // no comment field + }, + }, + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + expect(result).toMatchObject({ + success: false, + error: { code: "linear_response_malformed" }, + }); + }); + + it("returns error when commentCreate returns empty comment id", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: { + commentCreate: { + success: true, + comment: { id: "" }, + }, + }, + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + }); + expect(result).toMatchObject({ + success: false, + error: { code: "linear_response_malformed" }, + }); + }); + + it("returns error when commentUpdate returns success false", async () => { + const fetchFn = vi.fn<typeof fetch>().mockResolvedValue( + jsonResponse({ + data: { + commentUpdate: { + success: false, + }, + }, + }), + ); + const tool = createWorkpadSyncDynamicTool({ + apiKey: "linear-token", + fetchFn, + }); + const result = await tool.execute({ + issue_id: "issue-1", + file_path: workpadPath, + comment_id: "existing-comment-id", + }); + expect(result).toMatchObject({ + success: false, + error: { code: "linear_response_malformed" }, + }); + }); +}); + +function jsonResponse(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { + "content-type": "application/json", + }, + }); +} diff --git a/tests/config.test.ts b/tests/config.test.ts new file mode 100644 index 00000000..6bbe0320 --- /dev/null +++ b/tests/config.test.ts @@ -0,0 +1,213 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; + +// Mock the AI SDK modules before importing handler +vi.mock("ai", () => ({ + streamText: vi.fn(), +})); + +vi.mock("ai-sdk-provider-claude-code", () => ({ + claudeCode: vi.fn(), +})); + +import { streamText } from "ai"; +import { claudeCode } from "ai-sdk-provider-claude-code"; + +import type { BoltMessageArgs } from "../src/slack-bot/handler.js"; +import { createMessageHandler } from "../src/slack-bot/handler.js"; +import { createCcSessionStore } from "../src/slack-bot/session-store.js"; +import { parseSlashCommand } from "../src/slack-bot/slash-commands.js"; +import type { ChannelProjectMap, SessionMap } from "../src/slack-bot/types.js"; + +/** Create a mock Bolt message args object. */ +function createMockBoltArgs( + channelId: string, + text: string, +): { + args: BoltMessageArgs; + say: ReturnType<typeof vi.fn>; + client: { + reactions: { + add: ReturnType<typeof vi.fn>; + remove: ReturnType<typeof vi.fn>; + }; + }; +} { + const say = vi.fn().mockResolvedValue(undefined); + const client = { + reactions: { + add: vi.fn().mockResolvedValue(undefined), + remove: vi.fn().mockResolvedValue(undefined), + }, + }; + + const message = { + type: "message" as const, + text, + ts: "1234.5678", + channel: channelId, + }; + + const args = { + message, + say, + client, + context: {}, + logger: { debug: vi.fn(), info: vi.fn(), warn: vi.fn(), error: vi.fn() }, + next: vi.fn(), + event: message, + payload: message, + body: { event: message }, + } as unknown as BoltMessageArgs; + + return { args, say, client }; +} + +// Helper to create an async iterable from strings +async function* createAsyncIterable(chunks: string[]): AsyncIterable<string> { + for (const chunk of chunks) { + yield chunk; + } +} + +// Helper to create a mock streamText return value with response promise +function createMockStreamResult(chunks: string[], sessionId?: string) { + const messages = sessionId + ? [{ providerMetadata: { "claude-code": { sessionId } } }] + : []; + return { + textStream: createAsyncIterable(chunks), + response: Promise.resolve({ messages }), + } as unknown as ReturnType<typeof streamText>; +} + +describe("parseSlashCommand", () => { + it("parses /project set with a path", () => { + const result = parseSlashCommand("/project set ~/projects/jony"); + expect(result).toEqual({ + type: "project-set", + path: "~/projects/jony", + }); + }); + + it("parses /project set with absolute path", () => { + const result = parseSlashCommand("/project set /home/user/myapp"); + expect(result).toEqual({ + type: "project-set", + path: "/home/user/myapp", + }); + }); + + it("trims whitespace from the command", () => { + const result = parseSlashCommand(" /project set ~/projects/jony "); + expect(result).toEqual({ + type: "project-set", + path: "~/projects/jony", + }); + }); + + it("returns null for non-slash-command messages", () => { + expect(parseSlashCommand("Hello, how are you?")).toBeNull(); + }); + + it("returns null for unknown slash commands", () => { + expect(parseSlashCommand("/unknown command")).toBeNull(); + }); + + it("returns null for /project without set subcommand", () => { + expect(parseSlashCommand("/project")).toBeNull(); + }); + + it("returns null for /project set without a path", () => { + expect(parseSlashCommand("/project set")).toBeNull(); + }); +}); + +describe("Channel-to-project mapping via slash command", () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("updates channelMap when /project set is used", async () => { + const channelMap: ChannelProjectMap = new Map(); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args, say, client } = createMockBoltArgs( + "C456", + "/project set ~/projects/jony", + ); + await handler(args); + + // Verify channelMap was updated + expect(channelMap.get("C456")).toBe("~/projects/jony"); + + // Verify confirmation message was posted + expect(say).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("~/projects/jony"), + }), + ); + + // Verify Claude Code was NOT invoked for the slash command + expect(streamText).not.toHaveBeenCalled(); + expect(claudeCode).not.toHaveBeenCalled(); + + // Verify no reaction was added (slash commands skip reaction flow) + expect(client.reactions.add).not.toHaveBeenCalled(); + }); + + it("uses updated project dir for subsequent messages in the channel", async () => { + const channelMap: ChannelProjectMap = new Map(); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue(createMockStreamResult(["Done"])); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + // First: set the project via slash command + const setArgs = createMockBoltArgs("C456", "/project set ~/projects/jony"); + await handler(setArgs.args); + + // Then: send a regular message in the same channel + const regularArgs = createMockBoltArgs("C456", "What files are here?"); + await handler(regularArgs.args); + + // Verify claudeCode was called with the new project dir + expect(claudeCode).toHaveBeenCalledWith( + expect.any(String), + expect.objectContaining({ cwd: "~/projects/jony" }), + ); + }); + + it("overwrites existing channel mapping with /project set", async () => { + const channelMap: ChannelProjectMap = new Map([["C456", "/old/project"]]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args } = createMockBoltArgs("C456", "/project set /new/project"); + await handler(args); + + expect(channelMap.get("C456")).toBe("/new/project"); + }); +}); diff --git a/tests/config/config-resolver.test.ts b/tests/config/config-resolver.test.ts index de544340..5cbb899d 100644 --- a/tests/config/config-resolver.test.ts +++ b/tests/config/config-resolver.test.ts @@ -4,8 +4,10 @@ import { join } from "node:path"; import { describe, expect, it } from "vitest"; import { + resolveStagesConfig, resolveWorkflowConfig, validateDispatchConfig, + validateStagesConfig, } from "../../src/config/config-resolver.js"; import { DEFAULT_CODEX_COMMAND, @@ -63,6 +65,7 @@ describe("config-resolver", () => { expect(resolved.observability.renderIntervalMs).toBe( DEFAULT_OBSERVABILITY_RENDER_INTERVAL_MS, ); + expect(resolved.server.slackNotifyChannel).toBeNull(); }); it("coerces env-backed fields, path-like roots, and state limits", () => { @@ -221,6 +224,28 @@ describe("config-resolver", () => { ); }); + it("parses escalation_state from top-level config", () => { + const resolved = resolveWorkflowConfig({ + workflowPath: "/repo/WORKFLOW.md", + promptTemplate: "Prompt", + config: { + escalation_state: "Needs Triage", + }, + }); + + expect(resolved.escalationState).toBe("Needs Triage"); + }); + + it("defaults escalationState to null when not specified", () => { + const resolved = resolveWorkflowConfig({ + workflowPath: "/repo/WORKFLOW.md", + promptTemplate: "Prompt", + config: {}, + }); + + expect(resolved.escalationState).toBeNull(); + }); + it("blocks dispatch when required tracker settings are missing", () => { const resolved = resolveWorkflowConfig( { @@ -289,3 +314,132 @@ describe("config-resolver", () => { expect(validation).toEqual({ ok: true }); }); }); + +describe("config-resolver fast_track", () => { + it("parses fast_track label and initial_stage from stages config", () => { + const resolved = resolveWorkflowConfig({ + workflowPath: "/repo/WORKFLOW.md", + promptTemplate: "Prompt", + config: { + stages: { + initial_stage: "investigate", + fast_track: { + label: "trivial", + initial_stage: "implement", + }, + investigate: { type: "agent", on_complete: "implement" }, + implement: { type: "agent", on_complete: "done" }, + done: { type: "terminal" }, + }, + }, + }); + + expect(resolved.stages).not.toBeNull(); + expect(resolved.stages?.fastTrack).toEqual({ + label: "trivial", + initialStage: "implement", + }); + }); + + it("sets fastTrack to null when fast_track is not present in stages config", () => { + const resolved = resolveWorkflowConfig({ + workflowPath: "/repo/WORKFLOW.md", + promptTemplate: "Prompt", + config: { + stages: { + initial_stage: "investigate", + investigate: { type: "agent", on_complete: "done" }, + done: { type: "terminal" }, + }, + }, + }); + + expect(resolved.stages?.fastTrack).toBeNull(); + }); + + it("resolves slack_notify_channel from YAML config", () => { + const resolved = resolveWorkflowConfig({ + workflowPath: "/repo/WORKFLOW.md", + config: { + server: { slack_notify_channel: "C12345" }, + }, + promptTemplate: "Prompt", + }); + + expect(resolved.server.slackNotifyChannel).toBe("C12345"); + }); + + it("resolves slack_notify_channel from SLACK_NOTIFY_CHANNEL env var fallback", () => { + const resolved = resolveWorkflowConfig( + { + workflowPath: "/repo/WORKFLOW.md", + config: {}, + promptTemplate: "Prompt", + }, + { SLACK_NOTIFY_CHANNEL: "C99999" }, + ); + + expect(resolved.server.slackNotifyChannel).toBe("C99999"); + }); + + it("YAML slack_notify_channel takes precedence over env var", () => { + const resolved = resolveWorkflowConfig( + { + workflowPath: "/repo/WORKFLOW.md", + config: { + server: { slack_notify_channel: "C_YAML" }, + }, + promptTemplate: "Prompt", + }, + { SLACK_NOTIFY_CHANNEL: "C_ENV" }, + ); + + expect(resolved.server.slackNotifyChannel).toBe("C_YAML"); + }); + + it("returns null for slack_notify_channel when neither YAML nor env var is set", () => { + const resolved = resolveWorkflowConfig( + { + workflowPath: "/repo/WORKFLOW.md", + config: {}, + promptTemplate: "Prompt", + }, + {}, + ); + + expect(resolved.server.slackNotifyChannel).toBeNull(); + }); + + it("ignores non-string slack_notify_channel values", () => { + const resolved = resolveWorkflowConfig({ + workflowPath: "/repo/WORKFLOW.md", + config: { + server: { slack_notify_channel: 12345 }, + }, + promptTemplate: "Prompt", + }); + + expect(resolved.server.slackNotifyChannel).toBeNull(); + }); + + it("fast_track validation rejects unknown fast_track initial_stage target", () => { + const stagesConfig = resolveStagesConfig({ + initial_stage: "investigate", + fast_track: { + label: "trivial", + initial_stage: "nonexistent", + }, + investigate: { type: "agent", on_complete: "done" }, + done: { type: "terminal" }, + }); + + const result = validateStagesConfig(stagesConfig); + + expect(result.ok).toBe(false); + expect(result.errors).toEqual( + expect.arrayContaining([ + expect.stringContaining("fast_track.initial_stage 'nonexistent'"), + ]), + ); + }); +}); diff --git a/tests/config/stages.test.ts b/tests/config/stages.test.ts new file mode 100644 index 00000000..138c2724 --- /dev/null +++ b/tests/config/stages.test.ts @@ -0,0 +1,653 @@ +import { describe, expect, it } from "vitest"; + +import { + resolveStagesConfig, + validateStagesConfig, +} from "../../src/config/config-resolver.js"; +import type { StagesConfig } from "../../src/config/types.js"; + +describe("resolveStagesConfig", () => { + it("returns null when stages is undefined or not an object", () => { + expect(resolveStagesConfig(undefined)).toBeNull(); + expect(resolveStagesConfig(null)).toBeNull(); + expect(resolveStagesConfig("not-an-object")).toBeNull(); + expect(resolveStagesConfig([])).toBeNull(); + }); + + it("returns null when no stage entries have a valid type", () => { + expect( + resolveStagesConfig({ + investigate: { type: "invalid" }, + implement: {}, + }), + ).toBeNull(); + }); + + it("parses a minimal two-stage workflow", () => { + const result = resolveStagesConfig({ + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + max_turns: 30, + prompt: "implement.liquid", + on_complete: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result).not.toBeNull(); + expect(result!.initialStage).toBe("implement"); + expect(Object.keys(result!.stages)).toEqual(["implement", "done"]); + + const implement = result!.stages.implement!; + expect(implement.type).toBe("agent"); + expect(implement.runner).toBe("claude-code"); + expect(implement.model).toBe("claude-sonnet-4-5"); + expect(implement.maxTurns).toBe(30); + expect(implement.prompt).toBe("implement.liquid"); + expect(implement.transitions.onComplete).toBe("done"); + expect(implement.transitions.onApprove).toBeNull(); + expect(implement.transitions.onRework).toBeNull(); + + const done = result!.stages.done!; + expect(done.type).toBe("terminal"); + }); + + it("respects explicit initial_stage", () => { + const result = resolveStagesConfig({ + initial_stage: "investigate", + investigate: { + type: "agent", + on_complete: "implement", + }, + implement: { + type: "agent", + on_complete: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result!.initialStage).toBe("investigate"); + }); + + it("uses first stage as initial_stage when not specified", () => { + const result = resolveStagesConfig({ + investigate: { + type: "agent", + on_complete: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result!.initialStage).toBe("investigate"); + }); + + it("parses gate stages with gate_type, on_approve, on_rework, and max_rework", () => { + const result = resolveStagesConfig({ + review: { + type: "gate", + gate_type: "ensemble", + on_approve: "merge", + on_rework: "implement", + max_rework: 3, + }, + implement: { + type: "agent", + on_complete: "review", + }, + merge: { + type: "agent", + on_complete: "done", + }, + done: { + type: "terminal", + }, + }); + + const review = result!.stages.review!; + expect(review.type).toBe("gate"); + expect(review.gateType).toBe("ensemble"); + expect(review.maxRework).toBe(3); + expect(review.transitions.onApprove).toBe("merge"); + expect(review.transitions.onRework).toBe("implement"); + }); + + it("parses stage-level concurrency and timeout overrides", () => { + const result = resolveStagesConfig({ + investigate: { + type: "agent", + concurrency: 2, + timeout_ms: 60000, + on_complete: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result!.stages.investigate!.concurrency).toBe(2); + expect(result!.stages.investigate!.timeoutMs).toBe(60000); + }); + + it("parses linear_state from stage definition", () => { + const result = resolveStagesConfig({ + investigate: { + type: "agent", + linear_state: "In Progress", + on_complete: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result!.stages.investigate!.linearState).toBe("In Progress"); + }); + + it("defaults linearState to null when not specified", () => { + const result = resolveStagesConfig({ + implement: { + type: "agent", + on_complete: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result!.stages.implement!.linearState).toBeNull(); + expect(result!.stages.done!.linearState).toBeNull(); + }); + + it("treats unrecognized gate_type as null", () => { + const result = resolveStagesConfig({ + review: { + type: "gate", + gate_type: "unknown", + on_approve: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result!.stages.review!.gateType).toBeNull(); + }); +}); + +describe("validateStagesConfig", () => { + it("returns ok for null stages (no stages configured)", () => { + const result = validateStagesConfig(null); + expect(result.ok).toBe(true); + expect(result.errors).toEqual([]); + }); + + it("returns ok for a valid stage machine", () => { + const stages: StagesConfig = { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "gate", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble", + maxRework: 3, + reviewers: [], + transitions: { + onComplete: null, + onApprove: "done", + onRework: "investigate", + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(true); + expect(result.errors).toEqual([]); + }); + + it("rejects when initial_stage references unknown stage", () => { + const stages: StagesConfig = { + initialStage: "nonexistent", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "done", onApprove: null, onRework: null }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(false); + expect(result.errors).toContainEqual( + expect.stringContaining("initial_stage 'nonexistent'"), + ); + }); + + it("rejects agent stage without on_complete transition", () => { + const stages: StagesConfig = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(false); + expect(result.errors).toContainEqual( + expect.stringContaining("'implement' (agent) has no on_complete"), + ); + }); + + it("rejects gate stage without on_approve transition", () => { + const stages: StagesConfig = { + initialStage: "review", + fastTrack: null, + stages: { + review: { + type: "gate", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble", + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(false); + expect(result.errors).toContainEqual( + expect.stringContaining("'review' (gate) has no on_approve"), + ); + }); + + it("rejects transitions referencing unknown stages", () => { + const stages: StagesConfig = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "nonexistent", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(false); + expect(result.errors).toContainEqual( + expect.stringContaining( + "on_complete references unknown stage 'nonexistent'", + ), + ); + }); + + it("rejects when no terminal stage is defined", () => { + const stages: StagesConfig = { + initialStage: "a", + fastTrack: null, + stages: { + a: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "b", onApprove: null, onRework: null }, + linearState: null, + }, + b: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "a", onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(false); + expect(result.errors).toContainEqual( + expect.stringContaining("No terminal stage defined"), + ); + }); + + it("detects unreachable stages", () => { + const stages: StagesConfig = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "done", onApprove: null, onRework: null }, + linearState: null, + }, + orphan: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "done", onApprove: null, onRework: null }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(false); + expect(result.errors).toContainEqual( + expect.stringContaining("'orphan' is unreachable"), + ); + }); + + it("validates agent stage on_rework referencing valid stage", () => { + const stages: StagesConfig = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: 3, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: "implement", + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(true); + expect(result.errors).toEqual([]); + }); + + it("rejects agent stage on_rework referencing unknown stage", () => { + const stages: StagesConfig = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: 3, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: "nonexistent", + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + const result = validateStagesConfig(stages); + expect(result.ok).toBe(false); + expect(result.errors).toContainEqual( + expect.stringContaining( + "'review' on_rework references unknown stage 'nonexistent'", + ), + ); + }); +}); diff --git a/tests/domain/model.test.ts b/tests/domain/model.test.ts index f398edb0..20cc925e 100644 --- a/tests/domain/model.test.ts +++ b/tests/domain/model.test.ts @@ -1,12 +1,16 @@ import { describe, expect, it } from "vitest"; import { + type ExecutionHistory, + FAILURE_CLASSES, ORCHESTRATOR_EVENTS, ORCHESTRATOR_ISSUE_STATUSES, RUN_ATTEMPT_PHASES, + type StageRecord, createEmptyLiveSession, createInitialOrchestratorState, normalizeIssueState, + parseFailureSignal, toSessionId, toWorkspaceKey, } from "../../src/domain/model.js"; @@ -38,12 +42,15 @@ describe("domain model", () => { ]); expect(ORCHESTRATOR_EVENTS).toEqual([ "poll_tick", + "poll_tick_completed", "worker_exit_normal", "worker_exit_abnormal", + "stage_completed", "codex_update_event", "retry_timer_fired", "reconciliation_state_refresh", "stall_timeout", + "shutdown_complete", ]); }); @@ -65,10 +72,23 @@ describe("domain model", () => { codexInputTokens: 0, codexOutputTokens: 0, codexTotalTokens: 0, + codexCacheReadTokens: 0, + codexCacheWriteTokens: 0, + codexNoCacheTokens: 0, + codexReasoningTokens: 0, + codexTotalInputTokens: 0, + codexTotalOutputTokens: 0, lastReportedInputTokens: 0, lastReportedOutputTokens: 0, lastReportedTotalTokens: 0, turnCount: 0, + totalStageInputTokens: 0, + totalStageOutputTokens: 0, + totalStageTotalTokens: 0, + totalStageCacheReadTokens: 0, + totalStageCacheWriteTokens: 0, + turnHistory: [], + recentActivity: [], }); const state = createInitialOrchestratorState({ @@ -82,12 +102,171 @@ describe("domain model", () => { expect([...state.claimed]).toEqual([]); expect(state.retryAttempts).toEqual({}); expect([...state.completed]).toEqual([]); + expect([...state.failed]).toEqual([]); expect(state.codexTotals).toEqual({ inputTokens: 0, outputTokens: 0, totalTokens: 0, + cacheReadTokens: 0, + cacheWriteTokens: 0, + noCacheTokens: 0, + reasoningTokens: 0, secondsRunning: 0, }); expect(state.codexRateLimits).toBeNull(); + expect(state.issueExecutionHistory).toEqual({}); + }); +}); + +describe("ExecutionHistory", () => { + it("stage record captures all fields", () => { + const record: StageRecord = { + stageName: "implement", + durationMs: 12000, + totalTokens: 5000, + turns: 10, + outcome: "success", + }; + expect(record.stageName).toBe("implement"); + expect(record.durationMs).toBe(12000); + expect(record.totalTokens).toBe(5000); + expect(record.turns).toBe(10); + expect(record.outcome).toBe("success"); + }); + + it("stage record appended on worker exit", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 1000, + maxConcurrentAgents: 2, + }); + const record: StageRecord = { + stageName: "investigate", + durationMs: 5000, + totalTokens: 1000, + turns: 3, + outcome: "success", + }; + // Simulate appending a StageRecord on worker exit + state.issueExecutionHistory["issue-1"] = []; + state.issueExecutionHistory["issue-1"].push(record); + expect(state.issueExecutionHistory["issue-1"]).toHaveLength(1); + expect(state.issueExecutionHistory["issue-1"][0]).toEqual(record); + + // Simulate a second stage completing + const record2: StageRecord = { + stageName: "implement", + durationMs: 8000, + totalTokens: 2500, + turns: 5, + outcome: "success", + }; + state.issueExecutionHistory["issue-1"].push(record2); + expect(state.issueExecutionHistory["issue-1"]).toHaveLength(2); + }); + + it("execution history cleaned up after completion", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 1000, + maxConcurrentAgents: 2, + }); + const history: ExecutionHistory = [ + { + stageName: "investigate", + durationMs: 1000, + totalTokens: 100, + turns: 1, + outcome: "success", + }, + { + stageName: "implement", + durationMs: 2000, + totalTokens: 200, + turns: 2, + outcome: "success", + }, + { + stageName: "review", + durationMs: 3000, + totalTokens: 300, + turns: 3, + outcome: "success", + }, + { + stageName: "ship", + durationMs: 4000, + totalTokens: 400, + turns: 4, + outcome: "success", + }, + ]; + state.issueExecutionHistory["issue-1"] = history; + expect(state.issueExecutionHistory["issue-1"]).toHaveLength(4); + + // Simulate cleanup when issue reaches Done terminal state + // biome-ignore lint/performance/noDelete: delete required here - Record type doesn't accept undefined + delete state.issueExecutionHistory["issue-1"]; + expect(state.issueExecutionHistory["issue-1"]).toBeUndefined(); + }); +}); + +describe("parseFailureSignal", () => { + it("defines the expected failure classes", () => { + expect(FAILURE_CLASSES).toEqual([ + "verify", + "review", + "rebase", + "spec", + "infra", + ]); + }); + + it("parses each failure class from agent output", () => { + expect(parseFailureSignal("[STAGE_FAILED: verify]")).toEqual({ + failureClass: "verify", + }); + expect(parseFailureSignal("[STAGE_FAILED: review]")).toEqual({ + failureClass: "review", + }); + expect(parseFailureSignal("[STAGE_FAILED: rebase]")).toEqual({ + failureClass: "rebase", + }); + expect(parseFailureSignal("[STAGE_FAILED: spec]")).toEqual({ + failureClass: "spec", + }); + expect(parseFailureSignal("[STAGE_FAILED: infra]")).toEqual({ + failureClass: "infra", + }); + }); + + it("returns null for null, undefined, or empty input", () => { + expect(parseFailureSignal(null)).toBeNull(); + expect(parseFailureSignal(undefined)).toBeNull(); + expect(parseFailureSignal("")).toBeNull(); + }); + + it("returns null when no failure signal is present", () => { + expect(parseFailureSignal("[STAGE_COMPLETE]")).toBeNull(); + expect(parseFailureSignal("All tests passed successfully.")).toBeNull(); + expect(parseFailureSignal("STAGE_FAILED: verify")).toBeNull(); + }); + + it("extracts signal from longer agent output", () => { + const output = + "Tests failed.\n[STAGE_FAILED: verify]\nSee logs for details."; + expect(parseFailureSignal(output)).toEqual({ failureClass: "verify" }); + }); + + it("handles extra whitespace inside brackets", () => { + expect(parseFailureSignal("[STAGE_FAILED: spec ]")).toEqual({ + failureClass: "spec", + }); + expect(parseFailureSignal("[STAGE_FAILED:review]")).toEqual({ + failureClass: "review", + }); + }); + + it("rejects unknown failure classes", () => { + expect(parseFailureSignal("[STAGE_FAILED: unknown]")).toBeNull(); + expect(parseFailureSignal("[STAGE_FAILED: timeout]")).toBeNull(); }); }); diff --git a/tests/error-handling.test.ts b/tests/error-handling.test.ts new file mode 100644 index 00000000..9c454194 --- /dev/null +++ b/tests/error-handling.test.ts @@ -0,0 +1,226 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +// Mock the AI SDK modules before importing handler +vi.mock("ai", () => ({ + streamText: vi.fn(), +})); + +vi.mock("ai-sdk-provider-claude-code", () => ({ + claudeCode: vi.fn(), +})); + +vi.mock("../src/slack-bot/stream-consumer.js", () => ({ + StreamConsumer: vi.fn().mockImplementation(() => ({ + append: vi.fn().mockResolvedValue(undefined), + finish: vi.fn().mockResolvedValue(undefined), + })), +})); + +import { streamText } from "ai"; +import { claudeCode } from "ai-sdk-provider-claude-code"; + +import type { BoltMessageArgs } from "../src/slack-bot/handler.js"; +import { createMessageHandler } from "../src/slack-bot/handler.js"; +import { createCcSessionStore } from "../src/slack-bot/session-store.js"; +import { StreamConsumer } from "../src/slack-bot/stream-consumer.js"; +import type { ChannelProjectMap, SessionMap } from "../src/slack-bot/types.js"; + +/** Create a mock Bolt message args object. */ +function createMockBoltArgs( + channelId: string, + text: string, +): { + args: BoltMessageArgs; + say: ReturnType<typeof vi.fn>; + client: { + reactions: { + add: ReturnType<typeof vi.fn>; + remove: ReturnType<typeof vi.fn>; + }; + }; +} { + const say = vi.fn().mockResolvedValue(undefined); + const client = { + reactions: { + add: vi.fn().mockResolvedValue(undefined), + remove: vi.fn().mockResolvedValue(undefined), + }, + assistant: { + threads: { + setStatus: vi.fn().mockResolvedValue(undefined), + }, + }, + }; + + const message = { + type: "message" as const, + text, + ts: "1234.5678", + channel: channelId, + user: "U_TEST_USER", + }; + + const args = { + message, + say, + client, + context: { teamId: "T_TEST_TEAM" }, + logger: { debug: vi.fn(), info: vi.fn(), warn: vi.fn(), error: vi.fn() }, + next: vi.fn(), + event: message, + payload: message, + body: { event: message }, + } as unknown as BoltMessageArgs; + + return { args, say, client }; +} + +describe("Error handling", () => { + beforeEach(() => { + vi.mocked(StreamConsumer).mockImplementation( + () => + ({ + append: vi.fn().mockResolvedValue(undefined), + finish: vi.fn().mockResolvedValue(undefined), + }) as unknown as StreamConsumer, + ); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("posts a user-friendly error message to the thread when streamText throws", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + + // Create a failing async iterable (plain object to avoid lint/useYield) + const failingStream: AsyncIterable<string> = { + [Symbol.asyncIterator]() { + return { + async next(): Promise<IteratorResult<string>> { + throw new Error("Rate limit exceeded"); + }, + }; + }, + }; + + vi.mocked(streamText).mockReturnValue({ + textStream: failingStream, + response: Promise.resolve({ messages: [] }), + } as unknown as ReturnType<typeof streamText>); + + const handler = createMessageHandler({ channelMap, sessions, ccSessions }); + const { args, say } = createMockBoltArgs("C123", "test query"); + await handler(args); + + // Should post a structured error message + expect(say).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("Rate limit exceeded"), + }), + ); + }); + + it("adds an x reaction instead of checkmark on error", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + + const failingStream: AsyncIterable<string> = { + [Symbol.asyncIterator]() { + return { + async next(): Promise<IteratorResult<string>> { + throw new Error("Session failure"); + }, + }; + }, + }; + + vi.mocked(streamText).mockReturnValue({ + textStream: failingStream, + response: Promise.resolve({ messages: [] }), + } as unknown as ReturnType<typeof streamText>); + + const handler = createMessageHandler({ channelMap, sessions, ccSessions }); + const { args, client } = createMockBoltArgs("C123", "test"); + await handler(args); + + // Verify reactions.remove('eyes') was called + expect(client.reactions.remove).toHaveBeenCalledWith( + expect.objectContaining({ name: "eyes" }), + ); + + // Verify reactions.add('x') was called + expect(client.reactions.add).toHaveBeenCalledWith( + expect.objectContaining({ name: "x" }), + ); + + // Verify white_check_mark was NOT added + const addCalls = client.reactions.add.mock.calls; + const checkmarkCalls = addCalls.filter( + (call: unknown[]) => + (call[0] as Record<string, unknown>)?.name === "white_check_mark", + ); + expect(checkmarkCalls).toHaveLength(0); + }); + + it("handles non-Error thrown values with a generic message", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + + const failingStream: AsyncIterable<string> = { + [Symbol.asyncIterator]() { + return { + async next(): Promise<IteratorResult<string>> { + throw "string error"; // eslint-disable-line no-throw-literal + }, + }; + }, + }; + + vi.mocked(streamText).mockReturnValue({ + textStream: failingStream, + response: Promise.resolve({ messages: [] }), + } as unknown as ReturnType<typeof streamText>); + + const handler = createMessageHandler({ channelMap, sessions, ccSessions }); + const { args, say, client } = createMockBoltArgs("C123", "test"); + await handler(args); + + // Should post generic error message for non-Error values + expect(say).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("An unexpected error occurred"), + }), + ); + + // Should still add x reaction + expect(client.reactions.add).toHaveBeenCalledWith( + expect.objectContaining({ name: "x" }), + ); + }); +}); diff --git a/tests/fixtures/codex-fake-server.mjs b/tests/fixtures/codex-fake-server.mjs index dbe72624..bf89d46f 100644 --- a/tests/fixtures/codex-fake-server.mjs +++ b/tests/fixtures/codex-fake-server.mjs @@ -40,8 +40,8 @@ async function handleMessage(message) { "initialize must include clientInfo.name", ); assertEqual( - message.params.clientInfo?.version, - "0.1.0", + typeof message.params.clientInfo?.version, + "string", "initialize must include clientInfo.version", ); assertEqual( @@ -204,7 +204,7 @@ async function handleMessage(message) { usage: { input_tokens: 20, output_tokens: 10, - total_tokens: 30, + cache_read_input_tokens: 5, }, }, rate_limits: { @@ -291,6 +291,8 @@ async function handleMessage(message) { inputTokens: 14, outputTokens: 9, totalTokens: 23, + cacheReadTokens: 4, + reasoningTokens: 2, }, rateLimits: { requestsRemaining: 10, diff --git a/tests/logging/fields.test.ts b/tests/logging/fields.test.ts index 7482aa2d..edb6b227 100644 --- a/tests/logging/fields.test.ts +++ b/tests/logging/fields.test.ts @@ -21,4 +21,10 @@ describe("LOG_FIELDS", () => { expect(LOG_FIELDS).toContain("rate_limit_requests_remaining"); expect(LOG_FIELDS).toContain("rate_limit_tokens_remaining"); }); + + it("includes per-turn observability fields", () => { + expect(LOG_FIELDS).toContain("turn_number"); + expect(LOG_FIELDS).toContain("prompt_chars"); + expect(LOG_FIELDS).toContain("estimated_prompt_tokens"); + }); }); diff --git a/tests/logging/recent-activity.test.ts b/tests/logging/recent-activity.test.ts new file mode 100644 index 00000000..6659a2eb --- /dev/null +++ b/tests/logging/recent-activity.test.ts @@ -0,0 +1,287 @@ +import { describe, expect, it } from "vitest"; + +import type { CodexClientEvent } from "../../src/codex/app-server-client.js"; +import { createEmptyLiveSession } from "../../src/domain/model.js"; +import { + applyCodexEventToSession, + buildActivityContext, + extractToolInputFromRaw, + extractToolNameFromRaw, +} from "../../src/logging/session-metrics.js"; + +function createEvent( + event: CodexClientEvent["event"], + overrides?: Partial<CodexClientEvent>, +): CodexClientEvent { + return { + event, + timestamp: "2026-03-21T10:00:01.000Z", + codexAppServerPid: "42", + ...overrides, + }; +} + +describe("extractToolNameFromRaw", () => { + it("extracts tool name from params.toolName", () => { + expect(extractToolNameFromRaw({ params: { toolName: "Read" } })).toBe( + "Read", + ); + }); + + it("extracts tool name from params.name", () => { + expect(extractToolNameFromRaw({ params: { name: "Edit" } })).toBe("Edit"); + }); + + it("extracts tool name from params.tool.name", () => { + expect( + extractToolNameFromRaw({ params: { tool: { name: "Write" } } }), + ).toBe("Write"); + }); + + it("extracts tool name from top-level name", () => { + expect(extractToolNameFromRaw({ name: "Bash" })).toBe("Bash"); + }); + + it("returns null when no tool name is found", () => { + expect(extractToolNameFromRaw({ params: {} })).toBeNull(); + }); +}); + +describe("extractToolInputFromRaw", () => { + it("extracts from params.input", () => { + const result = extractToolInputFromRaw({ + params: { input: { file_path: "/src/foo.ts" } }, + }); + expect(result).toEqual({ file_path: "/src/foo.ts" }); + }); + + it("extracts from params.arguments", () => { + const result = extractToolInputFromRaw({ + params: { arguments: { command: "ls -la" } }, + }); + expect(result).toEqual({ command: "ls -la" }); + }); + + it("returns undefined when params is missing", () => { + expect(extractToolInputFromRaw({})).toBeUndefined(); + }); +}); + +describe("buildActivityContext", () => { + it("extracts basename for Read tool", () => { + expect( + buildActivityContext("Read", { file_path: "/home/user/src/model.ts" }), + ).toBe("model.ts"); + }); + + it("extracts basename for Edit tool", () => { + expect( + buildActivityContext("Edit", { file_path: "/repo/src/index.ts" }), + ).toBe("index.ts"); + }); + + it("extracts basename for Write tool", () => { + expect( + buildActivityContext("Write", { file_path: "/tmp/output.json" }), + ).toBe("output.json"); + }); + + it("extracts pattern for Glob tool", () => { + expect(buildActivityContext("Glob", { pattern: "**/*.ts" })).toBe( + "**/*.ts", + ); + }); + + it("extracts pattern for Grep tool", () => { + expect(buildActivityContext("Grep", { pattern: "extractToolName" })).toBe( + "extractToolName", + ); + }); + + it("truncates long Bash commands to ~60 chars", () => { + const longCommand = + "find /home/user -name '*.ts' -exec grep -l 'import' {} \\; | sort | uniq | head -100"; + const result = buildActivityContext("Bash", { command: longCommand }); + expect(result).not.toBeNull(); + expect(result!.length).toBeLessThanOrEqual(61); // 60 + ellipsis char + expect(result!).toContain("…"); + }); + + it("keeps short Bash commands as-is", () => { + expect(buildActivityContext("Bash", { command: "npm test" })).toBe( + "npm test", + ); + }); + + it("extracts first string-valued argument for unknown tools", () => { + expect(buildActivityContext("UnknownTool", { some: "data" })).toBe("data"); + }); + + it("truncates long string arguments for unknown tools to 60 chars", () => { + const longValue = "A".repeat(80); + const result = buildActivityContext("TodoWrite", { content: longValue }); + expect(result).toBe(`${"A".repeat(60)}…`); + }); + + it("returns null for unknown tools with no string-valued arguments", () => { + expect( + buildActivityContext("UnknownTool", { count: 42, flag: true }), + ).toBeNull(); + }); + + it("returns null when input is not an object", () => { + expect(buildActivityContext("Read", null)).toBeNull(); + expect(buildActivityContext("Read", undefined)).toBeNull(); + expect(buildActivityContext("Read", "string")).toBeNull(); + }); +}); + +describe("recent activity ring buffer", () => { + it("populates recentActivity on approval_auto_approved events", () => { + const session = createEmptyLiveSession(); + + const event = createEvent("approval_auto_approved", { + raw: { + params: { + toolName: "Read", + input: { file_path: "/repo/src/model.ts" }, + }, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]).toEqual({ + timestamp: "2026-03-21T10:00:01.000Z", + toolName: "Read", + context: "model.ts", + }); + }); + + it("populates recentActivity on notification events", () => { + const session = createEmptyLiveSession(); + + const event = createEvent("notification", { + message: "Downloading packages", + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]).toEqual({ + timestamp: "2026-03-21T10:00:01.000Z", + toolName: "Notification", + context: "Downloading packages", + }); + }); + + it("trims ring buffer to max 10 entries", () => { + const session = createEmptyLiveSession(); + + for (let i = 0; i < 15; i++) { + const event = createEvent("approval_auto_approved", { + timestamp: `2026-03-21T10:00:${String(i).padStart(2, "0")}.000Z`, + raw: { + params: { + toolName: "Edit", + input: { file_path: `/repo/src/file-${i}.ts` }, + }, + }, + }); + applyCodexEventToSession(session, event); + } + + expect(session.recentActivity).toHaveLength(10); + // The first 5 should have been trimmed; the oldest remaining entry is file-5 + expect(session.recentActivity[0]!.context).toBe("file-5.ts"); + expect(session.recentActivity[9]!.context).toBe("file-14.ts"); + }); + + it("records Bash tool calls with truncated commands", () => { + const session = createEmptyLiveSession(); + + const event = createEvent("approval_auto_approved", { + raw: { + params: { + toolName: "Bash", + input: { command: "npm test" }, + }, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Bash"); + expect(session.recentActivity[0]!.context).toBe("npm test"); + }); + + it("records unknown tool calls with first string arg as context", () => { + const session = createEmptyLiveSession(); + + const event = createEvent("approval_auto_approved", { + raw: { + params: { + toolName: "CustomTool", + input: { data: "value" }, + }, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("CustomTool"); + expect(session.recentActivity[0]!.context).toBe("value"); + }); + + it("records unknown tool calls with null context when no string args", () => { + const session = createEmptyLiveSession(); + + const event = createEvent("approval_auto_approved", { + raw: { + params: { + toolName: "CustomTool", + input: { count: 42, flag: true }, + }, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("CustomTool"); + expect(session.recentActivity[0]!.context).toBeNull(); + }); + + it("skips when raw is null or missing", () => { + const session = createEmptyLiveSession(); + + applyCodexEventToSession( + session, + createEvent("approval_auto_approved", { raw: undefined }), + ); + applyCodexEventToSession( + session, + createEvent("approval_auto_approved", { + raw: null as unknown as undefined, + }), + ); + + expect(session.recentActivity).toHaveLength(0); + }); + + it("skips when tool name cannot be extracted", () => { + const session = createEmptyLiveSession(); + + applyCodexEventToSession( + session, + createEvent("approval_auto_approved", { + raw: { params: { somethingElse: true } }, + }), + ); + + expect(session.recentActivity).toHaveLength(0); + }); +}); diff --git a/tests/logging/runtime-snapshot.test.ts b/tests/logging/runtime-snapshot.test.ts index 72c3263f..b9f00cd0 100644 --- a/tests/logging/runtime-snapshot.test.ts +++ b/tests/logging/runtime-snapshot.test.ts @@ -5,9 +5,154 @@ import { createEmptyLiveSession, createInitialOrchestratorState, } from "../../src/domain/model.js"; -import { buildRuntimeSnapshot } from "../../src/logging/runtime-snapshot.js"; +import { formatEasternTimestamp } from "../../src/logging/format-timestamp.js"; +import { + STAGE_STALL_THRESHOLDS, + buildRuntimeSnapshot, + getStallThreshold, +} from "../../src/logging/runtime-snapshot.js"; describe("runtime snapshot", () => { + it("includes pipeline_stage and activity_summary in running rows", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Editing src/foo.ts", + turnCount: 1, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + state.issueStages["issue-1"] = "implement"; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + expect(snapshot.running[0]!.pipeline_stage).toBe("implement"); + expect(snapshot.running[0]!.activity_summary).toBe("Editing src/foo.ts"); + }); + + it("includes rework_count in running row when greater than zero", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Fixing review comments", + turnCount: 3, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + state.issueReworkCounts["issue-1"] = 2; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + expect(snapshot.running[0]!.rework_count).toBe(2); + }); + + it("omits rework_count from running row when zero", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Working", + turnCount: 1, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + expect(snapshot.running[0]!.rework_count).toBeUndefined(); + }); + + it("sets pipeline_stage to null when no stage is set for the issue", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Working", + turnCount: 1, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running[0]!.pipeline_stage).toBeNull(); + }); + + it("includes stage_duration_seconds and tokens_per_turn in running rows", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + const startedAt = new Date(now.getTime() - 300_000).toISOString(); // 300 seconds ago + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt, + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-21T10:04:59.000Z", + lastCodexMessage: "Finished", + turnCount: 10, + codexInputTokens: 50000, + codexOutputTokens: 70000, + codexTotalTokens: 120000, + }); + entry.totalStageTotalTokens = 120000; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running).toHaveLength(1); + expect(snapshot.running[0]!.stage_duration_seconds).toBeCloseTo(300, 0); + expect(snapshot.running[0]!.tokens_per_turn).toBe(12000); + }); + it("builds a sorted state snapshot with live runtime totals", () => { const state = createInitialOrchestratorState({ pollIntervalMs: 30_000, @@ -21,7 +166,7 @@ describe("runtime snapshot", () => { requestsRemaining: 7, tokensRemaining: 700, }; - state.running["issue-2"] = createRunningEntry({ + const entry2 = createRunningEntry({ issueId: "issue-2", identifier: "ZZZ-2", startedAt: "2026-03-06T10:00:03.000Z", @@ -34,7 +179,11 @@ describe("runtime snapshot", () => { codexOutputTokens: 8, codexTotalTokens: 20, }); - state.running["issue-1"] = createRunningEntry({ + entry2.totalStageInputTokens = 12; + entry2.totalStageOutputTokens = 8; + entry2.totalStageTotalTokens = 20; + state.running["issue-2"] = entry2; + const entry1 = createRunningEntry({ issueId: "issue-1", identifier: "AAA-1", startedAt: "2026-03-06T10:00:00.000Z", @@ -47,6 +196,10 @@ describe("runtime snapshot", () => { codexOutputTokens: 20, codexTotalTokens: 50, }); + entry1.totalStageInputTokens = 30; + entry1.totalStageOutputTokens = 20; + entry1.totalStageTotalTokens = 50; + state.running["issue-1"] = entry1; state.retryAttempts["issue-3"] = { issueId: "issue-3", identifier: "MMM-3", @@ -54,16 +207,21 @@ describe("runtime snapshot", () => { dueAtMs: Date.parse("2026-03-06T10:00:20.000Z"), timerHandle: null, error: "no available orchestrator slots", + delayType: "failure", }; const snapshot = buildRuntimeSnapshot(state, { now: new Date("2026-03-06T10:00:10.000Z"), }); - expect(snapshot.generated_at).toBe("2026-03-06T10:00:10.000Z"); + expect(snapshot.generated_at).toBe( + formatEasternTimestamp(new Date("2026-03-06T10:00:10.000Z")), + ); expect(snapshot.counts).toEqual({ running: 2, retrying: 1, + completed: 0, + failed: 0, }); expect(snapshot.running.map((row) => row.issue_identifier)).toEqual([ "AAA-1", @@ -72,25 +230,27 @@ describe("runtime snapshot", () => { expect(snapshot.running[0]).toMatchObject({ issue_id: "issue-1", issue_identifier: "AAA-1", + issue_title: "AAA-1", state: "In Progress", session_id: "thread-a-turn-1", turn_count: 1, last_event: "turn_completed", last_message: "Finished", started_at: "2026-03-06T10:00:00.000Z", - last_event_at: "2026-03-06T10:00:05.000Z", tokens: { input_tokens: 30, output_tokens: 20, total_tokens: 50, }, }); + // last_event_at is now formatted in Eastern time (ISO-8601 with Eastern offset) + expect(snapshot.running[0]!.last_event_at).toMatch(/-0[45]:00$/); expect(snapshot.retrying).toEqual([ { issue_id: "issue-3", issue_identifier: "MMM-3", attempt: 2, - due_at: "2026-03-06T10:00:20.000Z", + due_at: formatEasternTimestamp(new Date("2026-03-06T10:00:20.000Z")), error: "no available orchestrator slots", }, ]); @@ -105,6 +265,948 @@ describe("runtime snapshot", () => { tokensRemaining: 700, }); }); + + it("includes cumulative ticket stats in running rows", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + // Set up execution history with two completed stages + state.issueExecutionHistory["issue-1"] = [ + { + stageName: "investigate", + durationMs: 10_000, + totalTokens: 50_000, + turns: 5, + outcome: "completed", + }, + { + stageName: "implement", + durationMs: 20_000, + totalTokens: 80_000, + turns: 10, + outcome: "completed", + }, + ]; + + // Running entry with 30K tokens accumulated in the current stage + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Finished", + turnCount: 3, + codexInputTokens: 10_000, + codexOutputTokens: 5_000, + codexTotalTokens: 15_000, + }); + // Simulate 30K tokens accumulated in the current stage + entry.totalStageTotalTokens = 30_000; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + const row = snapshot.running[0]!; + + // total_pipeline_tokens = 50K (investigate) + 80K (implement) + 30K (current stage) = 160K + expect(row.total_pipeline_tokens).toBe(160_000); + + // execution_history should include the two completed stage records + expect(row.execution_history).toEqual([ + { + stageName: "investigate", + durationMs: 10_000, + totalTokens: 50_000, + turns: 5, + outcome: "completed", + }, + { + stageName: "implement", + durationMs: 20_000, + totalTokens: 80_000, + turns: 10, + outcome: "completed", + }, + ]); + }); + + it("includes turn_history in running rows", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Editing src/foo.ts", + turnCount: 2, + codexInputTokens: 500, + codexOutputTokens: 300, + codexTotalTokens: 800, + }); + entry.turnHistory = [ + { + turnNumber: 1, + timestamp: "2026-03-06T10:00:03.000Z", + message: "Checking tests", + inputTokens: 200, + outputTokens: 100, + totalTokens: 300, + cacheReadTokens: 50, + reasoningTokens: 20, + event: "turn_completed", + }, + { + turnNumber: 2, + timestamp: "2026-03-06T10:00:05.000Z", + message: "Editing src/foo.ts", + inputTokens: 300, + outputTokens: 200, + totalTokens: 500, + cacheReadTokens: 80, + reasoningTokens: 30, + event: "turn_completed", + }, + ]; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + expect(snapshot.running[0]!.turn_history).toHaveLength(2); + expect(snapshot.running[0]!.turn_history[0]).toMatchObject({ + turnNumber: 1, + message: "Checking tests", + inputTokens: 200, + cacheReadTokens: 50, + reasoningTokens: 20, + }); + expect(snapshot.running[0]!.turn_history[1]).toMatchObject({ + turnNumber: 2, + message: "Editing src/foo.ts", + }); + }); + + it("populates last_tool_call from the last recentActivity entry", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Editing", + turnCount: 2, + codexInputTokens: 500, + codexOutputTokens: 300, + codexTotalTokens: 800, + }); + entry.recentActivity = [ + { + timestamp: "2026-03-06T10:00:03.000Z", + toolName: "Read", + context: "model.ts", + totalTokens: 100, + }, + { + timestamp: "2026-03-06T10:00:04.000Z", + toolName: "Bash", + context: "npm test", + totalTokens: 200, + }, + { + timestamp: "2026-03-06T10:00:05.000Z", + toolName: "Grep", + context: "pattern", + totalTokens: 150, + }, + ]; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running[0]!.last_tool_call).toBe("Grep pattern"); + }); + + it("sets last_tool_call to tool name only when context is null", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Editing", + turnCount: 1, + codexInputTokens: 500, + codexOutputTokens: 300, + codexTotalTokens: 800, + }); + entry.recentActivity = [ + { + timestamp: "2026-03-06T10:00:05.000Z", + toolName: "Agent", + context: null, + }, + ]; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running[0]!.last_tool_call).toBe("Agent"); + }); + + it("sets last_tool_call to null when recentActivity is empty", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Starting", + turnCount: 0, + codexInputTokens: 0, + codexOutputTokens: 0, + codexTotalTokens: 0, + }); + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running[0]!.last_tool_call).toBeNull(); + }); + + it("includes full token breakdown with cache and reasoning fields in running rows", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Working", + turnCount: 3, + codexInputTokens: 1000, + codexOutputTokens: 500, + codexTotalTokens: 1500, + }); + // Cumulative stage token fields (used by the dashboard snapshot) + entry.totalStageInputTokens = 1000; + entry.totalStageOutputTokens = 500; + entry.totalStageTotalTokens = 1500; + entry.totalStageCacheReadTokens = 200; + entry.totalStageCacheWriteTokens = 150; + entry.codexReasoningTokens = 75; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + const row = snapshot.running[0]!; + expect(row.tokens.input_tokens).toBe(1000); + expect(row.tokens.output_tokens).toBe(500); + expect(row.tokens.total_tokens).toBe(1500); + expect(row.tokens.cache_read_tokens).toBe(200); + expect(row.tokens.cache_write_tokens).toBe(150); + expect(row.tokens.reasoning_tokens).toBe(75); + }); + + it("classifies health as green when session is active and token burn is normal", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + const recentTimestamp = new Date(now.getTime() - 30_000).toISOString(); // 30s ago + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 60_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: recentTimestamp, + lastCodexMessage: "Working", + turnCount: 5, + codexInputTokens: 10_000, + codexOutputTokens: 5_000, + codexTotalTokens: 15_000, + }); + entry.totalStageTotalTokens = 15_000; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("green"); + expect(snapshot.running[0]!.health_reason).toBeNull(); + }); + + it("classifies health as red when session exceeds 80% of stage stall threshold", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + // merge stage has 300s threshold; 80% = 240s. 250s > 240s → red + const stalledTimestamp = new Date(now.getTime() - 250_000).toISOString(); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 300_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: stalledTimestamp, + lastCodexMessage: "Working", + turnCount: 2, + codexInputTokens: 1_000, + codexOutputTokens: 500, + codexTotalTokens: 1_500, + }); + entry.totalStageTotalTokens = 1_500; + state.running["issue-1"] = entry; + state.issueStages["issue-1"] = "merge"; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("red"); + expect(snapshot.running[0]!.health_reason).toContain("stalled"); + expect(snapshot.running[0]!.health_reason).toContain("merge stage"); + expect(snapshot.running[0]!.health_reason).toContain("threshold 300s"); + }); + + it("classifies health as yellow when tokens_per_turn exceeds 20000", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + const recentTimestamp = new Date(now.getTime() - 10_000).toISOString(); // 10s ago (not stalled) + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 60_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: recentTimestamp, + lastCodexMessage: "Working", + turnCount: 2, + codexInputTokens: 30_000, + codexOutputTokens: 12_000, + codexTotalTokens: 42_001, + }); + entry.totalStageTotalTokens = 42_001; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("yellow"); + expect(snapshot.running[0]!.health_reason).toContain("token"); + }); + + it("tokens in running row reflect cumulative stage totals, not per-turn absolute counters", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + // Simulate a session where codex absolute counters are small (e.g. start of a new turn) + // but the stage has already accumulated significant tokens across prior turns + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "session_started", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Starting", + turnCount: 5, + codexInputTokens: 0, // Absolute counters reset at turn boundary + codexOutputTokens: 0, + codexTotalTokens: 0, + }); + // Cumulative stage totals have been accumulating across 4 completed turns + entry.totalStageInputTokens = 40_000; + entry.totalStageOutputTokens = 20_000; + entry.totalStageTotalTokens = 60_000; + entry.totalStageCacheReadTokens = 5_000; + entry.totalStageCacheWriteTokens = 2_000; + entry.codexReasoningTokens = 1_000; // accumulated via += + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + const row = snapshot.running[0]!; + // tokens should show cumulative stage values, not the zero absolute counters + expect(row.tokens.input_tokens).toBe(40_000); + expect(row.tokens.output_tokens).toBe(20_000); + expect(row.tokens.total_tokens).toBe(60_000); + expect(row.tokens.cache_read_tokens).toBe(5_000); + expect(row.tokens.cache_write_tokens).toBe(2_000); + expect(row.tokens.reasoning_tokens).toBe(1_000); + }); + + it("includes first_dispatched_at from issueFirstDispatchedAt when set", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Working", + turnCount: 1, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + state.issueFirstDispatchedAt["issue-1"] = "2026-01-15T08:00:00.000Z"; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + expect(snapshot.running[0]!.first_dispatched_at).toBe( + "2026-01-15T08:00:00.000Z", + ); + }); + + it("falls back to startedAt for first_dispatched_at when issueFirstDispatchedAt is not set", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:00:05.000Z", + lastCodexMessage: "Working", + turnCount: 1, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running).toHaveLength(1); + expect(snapshot.running[0]!.first_dispatched_at).toBe( + "2026-03-06T10:00:00.000Z", + ); + }); + + it("returns zero total_pipeline_tokens and empty execution_history when no history exists", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "AAA-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: null, + lastCodexTimestamp: null, + lastCodexMessage: null, + turnCount: 0, + codexInputTokens: 0, + codexOutputTokens: 0, + codexTotalTokens: 0, + }); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running[0]!.total_pipeline_tokens).toBe(0); + expect(snapshot.running[0]!.execution_history).toEqual([]); + }); + + it("sets issue_title from entry.issue.title", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: null, + lastCodexTimestamp: null, + lastCodexMessage: null, + turnCount: 0, + codexInputTokens: 0, + codexOutputTokens: 0, + codexTotalTokens: 0, + }); + entry.issue.title = "Add login page"; + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running[0]!.issue_title).toBe("Add login page"); + }); + + it("formats last_event_at as Eastern time instead of raw UTC", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T15:30:45.000Z", + lastCodexMessage: "Working", + turnCount: 1, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T15:31:00.000Z"), + }); + + const lastEventAt = snapshot.running[0]!.last_event_at!; + // Should be formatted in Eastern time, not raw UTC (Z suffix) + expect(lastEventAt).not.toMatch(/Z$/); + // Should contain Eastern timezone offset (-05:00 for EST) + expect(lastEventAt).toMatch(/-0[45]:00$/); + // 15:30:45 UTC = 10:30:45 ET (EST) + expect(lastEventAt).toContain("10:30:45"); + }); + + it("returns null last_event_at when lastCodexTimestamp is null", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.running["issue-1"] = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: null, + lastCodexTimestamp: null, + lastCodexMessage: null, + turnCount: 0, + codexInputTokens: 0, + codexOutputTokens: 0, + codexTotalTokens: 0, + }); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.running[0]!.last_event_at).toBeNull(); + }); + + it("counts completed and failed issues from state Sets", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + state.completed.add("done-1"); + state.completed.add("done-2"); + state.failed.add("fail-1"); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.counts.completed).toBe(2); + expect(snapshot.counts.failed).toBe(1); + }); + + it("returns zero completed/failed when no execution history exists", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + + const snapshot = buildRuntimeSnapshot(state, { + now: new Date("2026-03-06T10:00:10.000Z"), + }); + + expect(snapshot.counts.completed).toBe(0); + expect(snapshot.counts.failed).toBe(0); + }); + + it("computes pipeline total time from first_dispatched_at for multi-stage issues", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-06T11:00:00.000Z"); + // First dispatched 1 hour ago + state.issueFirstDispatchedAt["issue-1"] = "2026-03-06T10:00:00.000Z"; + state.issueExecutionHistory["issue-1"] = [ + { + stageName: "investigate", + durationMs: 600_000, + totalTokens: 10_000, + turns: 5, + outcome: "success", + }, + ]; + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:30:00.000Z", // current stage started 30min ago + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:59:50.000Z", + lastCodexMessage: "Working", + turnCount: 3, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + // first_dispatched_at should be 1 hour before now + expect(snapshot.running[0]!.first_dispatched_at).toBe( + "2026-03-06T10:00:00.000Z", + ); + // Pipeline column uses first_dispatched_at for total wall-clock time + // The dashboard formats elapsed from first_dispatched_at to generated_at + }); + + it("uses started_at as pipeline total time for single-stage issues", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-06T10:05:00.000Z"); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: "2026-03-06T10:00:00.000Z", + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T10:04:50.000Z", + lastCodexMessage: "Working", + turnCount: 3, + codexInputTokens: 10, + codexOutputTokens: 5, + codexTotalTokens: 15, + }); + state.running["issue-1"] = entry; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + // For single-stage, first_dispatched_at falls back to started_at + expect(snapshot.running[0]!.first_dispatched_at).toBe( + "2026-03-06T10:00:00.000Z", + ); + }); +}); + +describe("getStallThreshold", () => { + it("returns stage-specific thresholds for known stages", () => { + expect(getStallThreshold("investigate")).toBe(600); + expect(getStallThreshold("implement")).toBe(480); + expect(getStallThreshold("review")).toBe(600); + expect(getStallThreshold("merge")).toBe(300); + }); + + it("returns default threshold for unknown stages", () => { + expect(getStallThreshold("custom-stage")).toBe(480); + }); + + it("returns default threshold for null stage", () => { + expect(getStallThreshold(null)).toBe(480); + }); +}); + +describe("stage-aware health classification", () => { + it("classifies health as yellow at 50%+ of investigate stage threshold", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + // investigate threshold=600s; 50% = 300s. 310s > 300s → yellow + const timestamp = new Date(now.getTime() - 310_000).toISOString(); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 600_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: timestamp, + lastCodexMessage: "Exploring codebase", + turnCount: 3, + codexInputTokens: 1_000, + codexOutputTokens: 500, + codexTotalTokens: 1_500, + }); + entry.totalStageTotalTokens = 1_500; + state.running["issue-1"] = entry; + state.issueStages["issue-1"] = "investigate"; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("yellow"); + expect(snapshot.running[0]!.health_reason).toContain("investigate stage"); + expect(snapshot.running[0]!.health_reason).toContain("threshold 600s"); + }); + + it("classifies health as red at 80%+ of investigate stage threshold", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + // investigate threshold=600s; 80% = 480s. 500s > 480s → red + const timestamp = new Date(now.getTime() - 500_000).toISOString(); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 600_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: timestamp, + lastCodexMessage: "Exploring codebase", + turnCount: 3, + codexInputTokens: 1_000, + codexOutputTokens: 500, + codexTotalTokens: 1_500, + }); + entry.totalStageTotalTokens = 1_500; + state.running["issue-1"] = entry; + state.issueStages["issue-1"] = "investigate"; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("red"); + expect(snapshot.running[0]!.health_reason).toContain("stalled"); + expect(snapshot.running[0]!.health_reason).toContain("investigate stage"); + }); + + it("classifies health as green within 50% of implement stage threshold", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + // implement threshold=480s; 50% = 240s. 200s < 240s → green + const timestamp = new Date(now.getTime() - 200_000).toISOString(); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 600_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: timestamp, + lastCodexMessage: "Writing code", + turnCount: 3, + codexInputTokens: 1_000, + codexOutputTokens: 500, + codexTotalTokens: 1_500, + }); + entry.totalStageTotalTokens = 1_500; + state.running["issue-1"] = entry; + state.issueStages["issue-1"] = "implement"; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("green"); + expect(snapshot.running[0]!.health_reason).toBeNull(); + }); + + it("uses default threshold for unknown stage names", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + // default threshold=480s; 80% = 384s. 400s > 384s → red + const timestamp = new Date(now.getTime() - 400_000).toISOString(); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 600_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: timestamp, + lastCodexMessage: "Working", + turnCount: 3, + codexInputTokens: 1_000, + codexOutputTokens: 500, + codexTotalTokens: 1_500, + }); + entry.totalStageTotalTokens = 1_500; + state.running["issue-1"] = entry; + state.issueStages["issue-1"] = "custom-stage"; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("red"); + expect(snapshot.running[0]!.health_reason).toContain("custom-stage stage"); + expect(snapshot.running[0]!.health_reason).toContain("threshold 480s"); + }); + + it("uses default threshold when no stage is set", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + // default threshold=480s; 50% = 240s. 250s > 240s → yellow + const timestamp = new Date(now.getTime() - 250_000).toISOString(); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 600_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: timestamp, + lastCodexMessage: "Working", + turnCount: 3, + codexInputTokens: 1_000, + codexOutputTokens: 500, + codexTotalTokens: 1_500, + }); + entry.totalStageTotalTokens = 1_500; + state.running["issue-1"] = entry; + // No issueStages entry → null stage + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("yellow"); + expect(snapshot.running[0]!.health_reason).toContain("unknown stage"); + expect(snapshot.running[0]!.health_reason).toContain("threshold 480s"); + }); + + it("health reason includes seconds of inactivity for yellow", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + const now = new Date("2026-03-21T10:05:00.000Z"); + // review threshold=600s; 50% = 300s. 312s > 300s → yellow + const timestamp = new Date(now.getTime() - 312_000).toISOString(); + const entry = createRunningEntry({ + issueId: "issue-1", + identifier: "ABC-1", + startedAt: new Date(now.getTime() - 600_000).toISOString(), + sessionId: "thread-a-turn-1", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: timestamp, + lastCodexMessage: "Reviewing", + turnCount: 3, + codexInputTokens: 1_000, + codexOutputTokens: 500, + codexTotalTokens: 1_500, + }); + entry.totalStageTotalTokens = 1_500; + state.running["issue-1"] = entry; + state.issueStages["issue-1"] = "review"; + + const snapshot = buildRuntimeSnapshot(state, { now }); + + expect(snapshot.running[0]!.health).toBe("yellow"); + expect(snapshot.running[0]!.health_reason).toBe( + "slow: no activity for 312s (review stage, threshold 600s)", + ); + }); + + it("STAGE_STALL_THRESHOLDS has expected values", () => { + expect(STAGE_STALL_THRESHOLDS).toEqual({ + investigate: 600, + implement: 480, + review: 600, + merge: 300, + }); + }); +}); + +describe("formatEasternTimestamp", () => { + it("formats a UTC date to Eastern time (ISO-8601 with EST offset)", () => { + // 2026-03-06 is in EST (UTC-5) + const result = formatEasternTimestamp(new Date("2026-03-06T15:30:45.000Z")); + // 15:30:45 UTC = 10:30:45 Eastern (EST = UTC-5) + expect(result).toContain("10:30:45"); + expect(result).toContain("-05:00"); + expect(result).not.toMatch(/Z$/); + }); + + it("handles EDT dates correctly", () => { + // 2026-07-15 is in EDT (UTC-4) + const result = formatEasternTimestamp(new Date("2026-07-15T18:00:00.000Z")); + // 18:00:00 UTC = 14:00:00 Eastern (EDT = UTC-4) + expect(result).toContain("14:00:00"); + expect(result).toContain("-04:00"); + expect(result).not.toMatch(/Z$/); + }); + + it("returns n/a for invalid dates", () => { + expect(formatEasternTimestamp(new Date("invalid"))).toBe("n/a"); + }); }); function createRunningEntry(input: { @@ -112,9 +1214,9 @@ function createRunningEntry(input: { identifier: string; startedAt: string; sessionId: string; - lastCodexEvent: string; - lastCodexTimestamp: string; - lastCodexMessage: string; + lastCodexEvent: string | null; + lastCodexTimestamp: string | null; + lastCodexMessage: string | null; turnCount: number; codexInputTokens: number; codexOutputTokens: number; diff --git a/tests/logging/session-metrics.test.ts b/tests/logging/session-metrics.test.ts index 01ebae47..73cf8517 100644 --- a/tests/logging/session-metrics.test.ts +++ b/tests/logging/session-metrics.test.ts @@ -3,12 +3,16 @@ import { describe, expect, it } from "vitest"; import type { CodexClientEvent } from "../../src/codex/app-server-client.js"; import { type RunningEntry, + type TurnHistoryEntry, createEmptyLiveSession, createInitialOrchestratorState, } from "../../src/domain/model.js"; import { addEndedSessionRuntime, + addPipelineActivity, applyCodexEventToOrchestratorState, + applyCodexEventToSession, + buildActivityContext, getAggregateSecondsRunning, summarizeCodexEvent, } from "../../src/logging/session-metrics.js"; @@ -64,6 +68,10 @@ describe("session metrics", () => { inputTokens: 14, outputTokens: 9, totalTokens: 23, + cacheReadTokens: 0, + cacheWriteTokens: 0, + noCacheTokens: 0, + reasoningTokens: 0, secondsRunning: 0, }); expect(state.codexRateLimits).toEqual({ @@ -94,6 +102,566 @@ describe("session metrics", () => { expect(secondsRunning).toBe(15.75); }); + it("accumulates cache and reasoning token details when present", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 3, + }); + const running = createRunningEntry(); + + const eventWithDetails = createEvent("turn_completed", { + usage: { + inputTokens: 20, + outputTokens: 10, + totalTokens: 30, + cacheReadTokens: 5, + cacheWriteTokens: 3, + noCacheTokens: 12, + reasoningTokens: 4, + }, + }); + + applyCodexEventToOrchestratorState(state, running, eventWithDetails); + + expect(running.codexCacheReadTokens).toBe(5); + expect(running.codexCacheWriteTokens).toBe(3); + expect(running.codexNoCacheTokens).toBe(12); + expect(running.codexReasoningTokens).toBe(4); + expect(state.codexTotals.cacheReadTokens).toBe(5); + expect(state.codexTotals.cacheWriteTokens).toBe(3); + expect(state.codexTotals.noCacheTokens).toBe(12); + expect(state.codexTotals.reasoningTokens).toBe(4); + }); + + it("leaves detail token counts at 0 when usage has no detail fields", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 3, + }); + const running = createRunningEntry(); + + const eventWithoutDetails = createEvent("turn_completed", { + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + }, + }); + + applyCodexEventToOrchestratorState(state, running, eventWithoutDetails); + + expect(running.codexCacheReadTokens).toBe(0); + expect(running.codexCacheWriteTokens).toBe(0); + expect(running.codexNoCacheTokens).toBe(0); + expect(running.codexReasoningTokens).toBe(0); + expect(state.codexTotals.cacheReadTokens).toBe(0); + expect(state.codexTotals.cacheWriteTokens).toBe(0); + expect(state.codexTotals.noCacheTokens).toBe(0); + expect(state.codexTotals.reasoningTokens).toBe(0); + }); + + it("accumulates detail tokens across multiple events", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 3, + }); + const running = createRunningEntry(); + + const firstEvent = createEvent("notification", { + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + cacheReadTokens: 3, + reasoningTokens: 2, + }, + }); + const secondEvent = createEvent("turn_completed", { + usage: { + inputTokens: 20, + outputTokens: 10, + totalTokens: 30, + cacheReadTokens: 7, + reasoningTokens: 6, + }, + }); + + applyCodexEventToOrchestratorState(state, running, firstEvent); + applyCodexEventToOrchestratorState(state, running, secondEvent); + + // Detail tokens are accumulated additively (not absolute like input/output/total) + expect(running.codexCacheReadTokens).toBe(10); + expect(running.codexReasoningTokens).toBe(8); + expect(state.codexTotals.cacheReadTokens).toBe(10); + expect(state.codexTotals.reasoningTokens).toBe(8); + }); + + it("returns zero deltas for detail tokens when no usage on event", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 3, + }); + const running = createRunningEntry(); + + const noUsageEvent = createEvent("notification"); + const result = applyCodexEventToOrchestratorState( + state, + running, + noUsageEvent, + ); + + expect(result.cacheReadTokensDelta).toBe(0); + expect(result.cacheWriteTokensDelta).toBe(0); + expect(result.noCacheTokensDelta).toBe(0); + expect(result.reasoningTokensDelta).toBe(0); + }); + + it("accumulates codexTotalInputTokens and codexTotalOutputTokens across multiple turns", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 3, + }); + const running = createRunningEntry(); + + // Turn 1 starts: session_started resets lastReported counters to 0 + const turn1Start = createEvent("session_started", { + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + }); + applyCodexEventToOrchestratorState(state, running, turn1Start); + + // Turn 1 completes: 100 input, 40 output + const turn1End = createEvent("turn_completed", { + usage: { + inputTokens: 100, + outputTokens: 40, + totalTokens: 140, + }, + }); + applyCodexEventToOrchestratorState(state, running, turn1End); + + expect(running.codexTotalInputTokens).toBe(100); + expect(running.codexTotalOutputTokens).toBe(40); + + // Turn 2 starts: session_started resets lastReported counters to 0 + const turn2Start = createEvent("session_started", { + sessionId: "thread-1-turn-2", + threadId: "thread-1", + turnId: "turn-2", + }); + applyCodexEventToOrchestratorState(state, running, turn2Start); + + // Turn 2 completes: 120 input, 60 output (counter resets to 0 each turn) + const turn2End = createEvent("turn_completed", { + usage: { + inputTokens: 120, + outputTokens: 60, + totalTokens: 180, + }, + }); + applyCodexEventToOrchestratorState(state, running, turn2End); + + // codexTotalInputTokens/OutputTokens should sum both turns: 100+120=220, 40+60=100 + expect(running.codexTotalInputTokens).toBe(220); + expect(running.codexTotalOutputTokens).toBe(100); + + // codexInputTokens still reflects the last absolute value (current turn only) + expect(running.codexInputTokens).toBe(120); + expect(running.codexOutputTokens).toBe(60); + }); + + it("single-turn stage: totalStage fields match the single turn values", () => { + const running = createRunningEntry(); + + const event = createEvent("turn_completed", { + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + cacheReadTokens: 3, + cacheWriteTokens: 2, + }, + }); + + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 3, + }); + applyCodexEventToOrchestratorState(state, running, event); + + expect(running.totalStageInputTokens).toBe(10); + expect(running.totalStageOutputTokens).toBe(5); + expect(running.totalStageTotalTokens).toBe(15); + expect(running.totalStageCacheReadTokens).toBe(3); + expect(running.totalStageCacheWriteTokens).toBe(2); + }); + + it("multi-turn stage: totalStage fields equal sum of all turn deltas", () => { + const running = createRunningEntry(); + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 3, + }); + + // First turn: absolute counters start from 0 + const firstTurn = createEvent("notification", { + usage: { + inputTokens: 10, + outputTokens: 4, + totalTokens: 14, + cacheReadTokens: 2, + cacheWriteTokens: 1, + }, + }); + // Second turn: absolute counters increase + const secondTurn = createEvent("turn_completed", { + usage: { + inputTokens: 20, + outputTokens: 9, + totalTokens: 29, + cacheReadTokens: 5, + cacheWriteTokens: 3, + }, + }); + + applyCodexEventToOrchestratorState(state, running, firstTurn); + applyCodexEventToOrchestratorState(state, running, secondTurn); + + // inputTokensDelta for first = 10, for second = 10 (20-10), total = 20 + expect(running.totalStageInputTokens).toBe(20); + // outputTokensDelta for first = 4, for second = 5 (9-4), total = 9 + expect(running.totalStageOutputTokens).toBe(9); + // totalTokensDelta for first = 14, for second = 15 (29-14), total = 29 + expect(running.totalStageTotalTokens).toBe(29); + // cacheReadTokens accumulated additively: 2 + 5 = 7 + expect(running.totalStageCacheReadTokens).toBe(7); + // cacheWriteTokens accumulated additively: 1 + 3 = 4 + expect(running.totalStageCacheWriteTokens).toBe(4); + }); + + it("zero-turn stage: all totalStage accumulator fields are 0", () => { + const running = createRunningEntry(); + + expect(running.totalStageInputTokens).toBe(0); + expect(running.totalStageOutputTokens).toBe(0); + expect(running.totalStageTotalTokens).toBe(0); + expect(running.totalStageCacheReadTokens).toBe(0); + expect(running.totalStageCacheWriteTokens).toBe(0); + }); + + it("turn history ring buffer captures turn summaries", () => { + const session = createEmptyLiveSession(); + + const event1 = createEvent("session_started", { + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + timestamp: "2026-03-06T10:00:01.000Z", + }); + const event2 = createEvent("session_started", { + sessionId: "thread-1-turn-2", + threadId: "thread-1", + turnId: "turn-2", + timestamp: "2026-03-06T10:00:02.000Z", + }); + const event3 = createEvent("session_started", { + sessionId: "thread-1-turn-3", + threadId: "thread-1", + turnId: "turn-3", + timestamp: "2026-03-06T10:00:03.000Z", + }); + + applyCodexEventToSession(session, event1); + applyCodexEventToSession(session, event2); + applyCodexEventToSession(session, event3); + + // Turns 1 and 2 are complete; turn 3 is in progress + expect(session.turnHistory).toHaveLength(2); + + const entry1 = session.turnHistory[0] as TurnHistoryEntry; + const entry2 = session.turnHistory[1] as TurnHistoryEntry; + + // Each entry must have all required fields + expect(entry1).toHaveProperty("turnNumber"); + expect(entry1).toHaveProperty("timestamp"); + expect(entry1).toHaveProperty("message"); + expect(entry1).toHaveProperty("inputTokens"); + expect(entry1).toHaveProperty("outputTokens"); + expect(entry1).toHaveProperty("totalTokens"); + expect(entry1).toHaveProperty("cacheReadTokens"); + expect(entry1).toHaveProperty("reasoningTokens"); + expect(entry1).toHaveProperty("event"); + + expect(entry1.turnNumber).toBe(1); + expect(entry1.timestamp).toBe("2026-03-06T10:00:02.000Z"); + expect(entry1.inputTokens).toBe(0); + expect(entry1.outputTokens).toBe(0); + expect(entry1.totalTokens).toBe(0); + expect(entry1.cacheReadTokens).toBe(0); + expect(entry1.reasoningTokens).toBe(0); + expect(entry1.event).toBe("session_started"); + + expect(entry2.turnNumber).toBe(2); + expect(entry2.timestamp).toBe("2026-03-06T10:00:03.000Z"); + }); + + it("turn history ring buffer caps at 50 entries", () => { + const session = createEmptyLiveSession(); + + // Process 55 session_started events + for (let i = 1; i <= 55; i++) { + applyCodexEventToSession( + session, + createEvent("session_started", { + sessionId: `thread-1-turn-${i}`, + threadId: "thread-1", + turnId: `turn-${i}`, + timestamp: `2026-03-06T10:00:${String(i).padStart(2, "0")}.000Z`, + }), + ); + } + + // After 55 session_started events: 54 entries would exist before capping + // Capped at 50 → oldest 4 evicted + expect(session.turnHistory).toHaveLength(50); + + // Oldest 4 entries (turnNumbers 1-4) should have been evicted + const firstEntry = session.turnHistory[0] as TurnHistoryEntry; + expect(firstEntry.turnNumber).toBe(5); + + // Most recent retained entry is turn 54 (turn 55 is in progress) + const lastEntry = session.turnHistory[49] as TurnHistoryEntry; + expect(lastEntry.turnNumber).toBe(54); + }); + + describe("broadened recentActivity tracking", () => { + it("tracks unsupported_tool_call events with tool name and context", () => { + const session = createEmptyLiveSession(); + const event = createEvent("unsupported_tool_call", { + raw: { + params: { + toolName: "linear_graphql", + input: { query: "{ viewer { id } }" }, + }, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("linear_graphql"); + expect(session.recentActivity[0]!.context).toBe("{ viewer { id } }"); + }); + + it("tracks turn_completed events with token count", () => { + const session = createEmptyLiveSession(); + const event = createEvent("turn_completed", { + usage: { + inputTokens: 500, + outputTokens: 200, + totalTokens: 700, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Turn completed"); + expect(session.recentActivity[0]!.context).toBeNull(); + expect(session.recentActivity[0]!.totalTokens).toBe(700); + }); + + it("tracks turn_completed events without usage", () => { + const session = createEmptyLiveSession(); + const event = createEvent("turn_completed"); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Turn completed"); + expect(session.recentActivity[0]!.context).toBeNull(); + }); + + it("tracks turn_failed events with token count", () => { + const session = createEmptyLiveSession(); + const event = createEvent("turn_failed", { + usage: { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Turn failed"); + expect(session.recentActivity[0]!.context).toBeNull(); + expect(session.recentActivity[0]!.totalTokens).toBe(150); + }); + + it("tracks session_started events", () => { + const session = createEmptyLiveSession(); + const event = createEvent("session_started", { + sessionId: "s1", + threadId: "t1", + turnId: "turn-1", + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Session started"); + expect(session.recentActivity[0]!.context).toBeNull(); + }); + + it("tracks notification events with message as context", () => { + const session = createEmptyLiveSession(); + const event = createEvent("notification", { + message: "Downloading dependencies…", + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Notification"); + expect(session.recentActivity[0]!.context).toBe( + "Downloading dependencies…", + ); + }); + + it("truncates long notification messages", () => { + const session = createEmptyLiveSession(); + const longMessage = "A".repeat(120); + const event = createEvent("notification", { + message: longMessage, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.context).toBe(`${"A".repeat(80)}…`); + }); + + it("tracks notification events without message", () => { + const session = createEmptyLiveSession(); + const event = createEvent("notification"); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Notification"); + expect(session.recentActivity[0]!.context).toBeNull(); + }); + + it("still tracks approval_auto_approved events", () => { + const session = createEmptyLiveSession(); + const event = createEvent("approval_auto_approved", { + raw: { + params: { + toolName: "Read", + input: { file_path: "/tmp/foo/bar.ts" }, + }, + }, + }); + + applyCodexEventToSession(session, event); + + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("Read"); + expect(session.recentActivity[0]!.context).toBe("bar.ts"); + }); + + it("respects RECENT_ACTIVITY_MAX_SIZE of 10 across mixed events", () => { + const session = createEmptyLiveSession(); + + // Push 12 events of mixed types + for (let i = 0; i < 12; i++) { + const event = + i % 2 === 0 + ? createEvent("turn_completed", { + usage: { + inputTokens: i * 10, + outputTokens: i * 5, + totalTokens: i * 15, + }, + timestamp: `2026-03-06T10:00:${String(i).padStart(2, "0")}.000Z`, + }) + : createEvent("notification", { + message: `msg-${i}`, + timestamp: `2026-03-06T10:00:${String(i).padStart(2, "0")}.000Z`, + }); + applyCodexEventToSession(session, event); + } + + expect(session.recentActivity).toHaveLength(10); + // Oldest 2 should have been evicted — first entry timestamp should be index 2 + expect(session.recentActivity[0]!.timestamp).toBe( + "2026-03-06T10:00:02.000Z", + ); + }); + }); + + describe("no synthetic entries in activity feed", () => { + it("addPipelineActivity with stage_transition still works", () => { + const session = createEmptyLiveSession(); + addPipelineActivity(session, "stage_transition", "Stage → implement"); + expect(session.recentActivity).toHaveLength(1); + expect(session.recentActivity[0]!.toolName).toBe("stage_transition"); + }); + + it("activity feed does not contain session_start or state_change entries from orchestrator dispatch", () => { + // After removing synthetic entries from core.ts, a fresh session + // should have no session_start or state_change entries in recentActivity + const session = createEmptyLiveSession(); + // Simulate what the orchestrator now does (no addPipelineActivity calls for session_start/state_change) + expect( + session.recentActivity.filter( + (e) => + e.toolName === "session_start" || e.toolName === "state_change", + ), + ).toHaveLength(0); + }); + }); + + describe("unknown tool types show arguments", () => { + it("extracts first string-valued argument for unknown tools", () => { + expect( + buildActivityContext("TodoWrite", { content: "Fix the bug" }), + ).toBe("Fix the bug"); + }); + + it("truncates long string arguments to 60 chars", () => { + const longValue = "A".repeat(80); + expect(buildActivityContext("WebSearch", { query: longValue })).toBe( + `${"A".repeat(60)}…`, + ); + }); + + it("returns null for unknown tools with no string-valued arguments", () => { + expect( + buildActivityContext("SomeTool", { count: 42, flag: true }), + ).toBeNull(); + }); + + it("skips empty/whitespace-only string arguments", () => { + expect( + buildActivityContext("SomeTool", { empty: "", second: "valid" }), + ).toBe("valid"); + }); + + it("picks first string argument when mixed types exist", () => { + expect( + buildActivityContext("SomeTool", { + num: 42, + name: "hello", + other: "world", + }), + ).toBe("hello"); + }); + }); + it("summarizes codex events for snapshot and log surfaces", () => { expect( summarizeCodexEvent( diff --git a/tests/observability/dashboard-render.test.ts b/tests/observability/dashboard-render.test.ts new file mode 100644 index 00000000..7f841d2f --- /dev/null +++ b/tests/observability/dashboard-render.test.ts @@ -0,0 +1,136 @@ +import { describe, expect, it } from "vitest"; + +import type { RuntimeSnapshot } from "../../src/logging/runtime-snapshot.js"; +import { renderDashboardHtml } from "../../src/observability/dashboard-render.js"; +import { getDisplayVersion } from "../../src/version.js"; + +const BASE_ROW: RuntimeSnapshot["running"][number] = { + issue_id: "issue-1", + issue_identifier: "SYMPH-47", + issue_title: "Test issue title", + state: "In Progress", + pipeline_stage: "implement", + activity_summary: "Working on it", + session_id: "session-abc", + turn_count: 3, + last_event: "notification", + last_message: "Working on it", + started_at: "2026-03-21T10:00:00.000Z", + first_dispatched_at: "2026-03-21T10:00:00.000Z", + last_event_at: "2026-03-21T10:01:00.000Z", + stage_duration_seconds: 60, + tokens_per_turn: 500, + tokens: { + input_tokens: 1000, + output_tokens: 500, + total_tokens: 1500, + cache_read_tokens: 200, + cache_write_tokens: 100, + reasoning_tokens: 50, + }, + total_pipeline_tokens: 1500, + execution_history: [], + turn_history: [], + recent_activity: [], + last_tool_call: null, + health: "green", + health_reason: null, +}; + +function buildSnapshot( + rowOverrides: Partial<RuntimeSnapshot["running"][number]>, +): RuntimeSnapshot { + return { + generated_at: "2026-03-21T10:05:30.000Z", + counts: { running: 1, retrying: 0, completed: 0, failed: 0 }, + running: [{ ...BASE_ROW, ...rowOverrides }], + retrying: [], + codex_totals: { + input_tokens: 1000, + output_tokens: 500, + total_tokens: 1500, + seconds_running: 330, + }, + rate_limits: {}, + }; +} + +describe("Dashboard Pipeline column", () => { + it("shows 'Pipeline' column header in the running table", () => { + const snapshot = buildSnapshot({}); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: false }); + expect(html).toContain("<th>Pipeline</th>"); + }); + + it("shows elapsed pipeline time for multi-stage issues (first_dispatched_at earlier than started_at)", () => { + // first_dispatched_at is 5m 30s before started_at + // generated_at is 2026-03-21T10:05:30.000Z + // first_dispatched_at is 2026-03-21T09:54:30.000Z → 11m 0s before generated_at + const snapshot = buildSnapshot({ + started_at: "2026-03-21T10:00:00.000Z", + first_dispatched_at: "2026-03-21T09:54:30.000Z", + }); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: false }); + // Pipeline time: from 09:54:30 to 10:05:30 = 11m 0s + expect(html).toContain("11m 0s"); + }); + + it("shows '—' in the Pipeline column for single-stage issues (first_dispatched_at equals started_at)", () => { + const snapshot = buildSnapshot({ + started_at: "2026-03-21T10:00:00.000Z", + first_dispatched_at: "2026-03-21T10:00:00.000Z", + }); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: false }); + // The Pipeline td should contain an em-dash (—) + // Use a regex to check the Pipeline column td contains — and no time string pattern near it + expect(html).toContain("—"); + // Verify: the Pipeline cell itself does NOT contain a "Xm Ys" pattern + // We do this by checking the generated HTML around the runtime column + // The runtime/turns column shows time since started_at; Pipeline should be — + const pipelineCellMatch = html.match( + /<td class="numeric">[^<]*<\/td>\s*<td class="numeric">([^<]*)<\/td>/, + ); + expect(pipelineCellMatch).not.toBeNull(); + const pipelineContent: string | undefined = pipelineCellMatch?.[1]; + // The second numeric cell (Pipeline) should be — + expect(pipelineContent?.trim()).toBe("—"); + }); + + it("includes formatPipelineTime in client-side JavaScript", () => { + const snapshot = buildSnapshot({}); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: true }); + expect(html).toContain("formatPipelineTime"); + }); + it("dashboard shows version in hero header", () => { + const snapshot = buildSnapshot({}); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: false }); + expect(html).toContain(getDisplayVersion()); + expect(html).toContain("Symphony Observability"); + }); + + it("activity column shows last_tool_call when present", () => { + const snapshot = buildSnapshot({ + last_tool_call: "Read model.ts", + activity_summary: "Working on it", + last_event: "notification", + }); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: false }); + expect(html).toContain("Read model.ts"); + }); + + it("activity column falls back to activity_summary when last_tool_call is null", () => { + const snapshot = buildSnapshot({ + last_tool_call: null, + activity_summary: "Working on it", + last_event: "notification", + }); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: false }); + expect(html).toContain("Working on it"); + }); + + it("client-side JavaScript references last_tool_call for activity text", () => { + const snapshot = buildSnapshot({}); + const html = renderDashboardHtml(snapshot, { liveUpdatesEnabled: true }); + expect(html).toContain("row.last_tool_call"); + }); +}); diff --git a/tests/observability/dashboard-server.test.ts b/tests/observability/dashboard-server.test.ts index 16d6965b..2e4c4f95 100644 --- a/tests/observability/dashboard-server.test.ts +++ b/tests/observability/dashboard-server.test.ts @@ -25,7 +25,7 @@ describe("dashboard server", () => { }); servers.push(server); - expect(server.hostname).toBe("127.0.0.1"); + expect(server.hostname).toBe("0.0.0.0"); expect(server.port).toBeGreaterThan(0); const dashboard = await sendRequest(server.port, { @@ -279,6 +279,8 @@ describe("dashboard server", () => { counts: { running: 2, retrying: 1, + completed: 0, + failed: 0, }, }; emitUpdate(); @@ -295,6 +297,177 @@ describe("dashboard server", () => { stream.close(); }); + it("renders expandable detail rows with toggle and detail panel for running sessions", async () => { + const server = await startDashboardServer({ + port: 0, + host: createHost(), + }); + servers.push(server); + + const dashboard = await sendRequest(server.port, { + method: "GET", + path: "/", + }); + expect(dashboard.statusCode).toBe(200); + expect(dashboard.body).toContain("expand-toggle"); + expect(dashboard.body).toContain("detail-row"); + expect(dashboard.body).toContain("detail-panel"); + expect(dashboard.body).toContain("detail-grid"); + expect(dashboard.body).toContain("Token breakdown"); + expect(dashboard.body).toContain("Recent activity"); + expect(dashboard.body).toContain("Execution history"); + expect(dashboard.body).toContain("aria-expanded"); + expect(dashboard.body).toContain("Cache read"); + expect(dashboard.body).toContain("Cache write"); + expect(dashboard.body).toContain("Reasoning"); + }); + + it("renders context section in detail panel with stage, activity summary, health reason, and rework count", async () => { + const baseRow = createSnapshot().running[0]!; + const snapshotWithContext: RuntimeSnapshot = { + ...createSnapshot(), + running: [ + { + ...baseRow, + pipeline_stage: "implement", + activity_summary: "Reviewing PR #42", + health: "yellow", + health_reason: "high token burn: 23,400 tokens/turn", + rework_count: 2, + }, + ], + }; + const server = await startDashboardServer({ + port: 0, + host: createHost({ + getRuntimeSnapshot: () => snapshotWithContext, + }), + }); + servers.push(server); + + const dashboard = await sendRequest(server.port, { + method: "GET", + path: "/", + }); + expect(dashboard.statusCode).toBe(200); + // Use class= attribute form since CSS also defines these class names + expect(dashboard.body).toContain('class="context-section"'); + expect(dashboard.body).toContain('class="stage-badge"'); + expect(dashboard.body).toContain("implement"); + expect(dashboard.body).toContain("Reviewing PR #42"); + expect(dashboard.body).toContain('class="context-health-yellow"'); + expect(dashboard.body).toContain("high token burn: 23,400 tokens/turn"); + expect(dashboard.body).toContain("state-badge-warning"); + expect(dashboard.body).toContain("Rework"); + // Context section (rendered element) appears before detail-grid in the HTML + const contextIdx = dashboard.body.indexOf('class="context-section"'); + const gridIdx = dashboard.body.indexOf('class="detail-grid"'); + expect(contextIdx).toBeGreaterThan(-1); + expect(gridIdx).toBeGreaterThan(-1); + expect(contextIdx).toBeLessThan(gridIdx); + }); + + it("omits context section when pipeline_stage, activity_summary, health_reason, and rework_count are all absent", async () => { + const baseRow = createSnapshot().running[0]!; + const snapshotNoContext: RuntimeSnapshot = { + ...createSnapshot(), + running: [ + { + ...baseRow, + pipeline_stage: null, + activity_summary: null, + health: "green", + health_reason: null, + }, + ], + }; + const server = await startDashboardServer({ + port: 0, + host: createHost({ + getRuntimeSnapshot: () => snapshotNoContext, + }), + }); + servers.push(server); + + const dashboard = await sendRequest(server.port, { + method: "GET", + path: "/", + }); + expect(dashboard.statusCode).toBe(200); + expect(dashboard.body).toContain("detail-panel"); + expect(dashboard.body).toContain("Token breakdown"); + // The rendered detail-row should not contain the context-section opening tag. + // The JS code embeds class="context-section" as a string literal, so we check + // only the server-rendered detail-row section (between detail-row and /tr). + const detailRowStart = dashboard.body.indexOf('class="detail-row"'); + const detailRowEnd = dashboard.body.indexOf("</tr>", detailRowStart); + expect(detailRowStart).toBeGreaterThan(-1); + const detailRowHtml = dashboard.body.slice(detailRowStart, detailRowEnd); + expect(detailRowHtml).not.toContain('class="context-section"'); + expect(detailRowHtml).toContain('class="detail-grid"'); + }); + + it("shows context-health-red for stalled (red health) agent in detail panel", async () => { + const baseRow = createSnapshot().running[0]!; + const snapshotRed: RuntimeSnapshot = { + ...createSnapshot(), + running: [ + { + ...baseRow, + pipeline_stage: "investigate", + activity_summary: null, + health: "red", + health_reason: "stalled: no activity for 145s", + }, + ], + }; + const server = await startDashboardServer({ + port: 0, + host: createHost({ + getRuntimeSnapshot: () => snapshotRed, + }), + }); + servers.push(server); + + const dashboard = await sendRequest(server.port, { + method: "GET", + path: "/", + }); + expect(dashboard.statusCode).toBe(200); + expect(dashboard.body).toContain("context-health-red"); + expect(dashboard.body).toContain("stalled: no activity for 145s"); + expect(dashboard.body).toContain("investigate"); + // The rendered context item uses context-health-red, not context-health-yellow + expect(dashboard.body).not.toContain('class="context-health-yellow"'); + }); + + it("renders an empty state for the running sessions table when there are no running sessions", async () => { + const emptySnapshot: RuntimeSnapshot = { + ...createSnapshot(), + counts: { running: 0, retrying: 0, completed: 0, failed: 0 }, + running: [], + retrying: [], + }; + const server = await startDashboardServer({ + port: 0, + host: createHost({ + getRuntimeSnapshot: () => emptySnapshot, + }), + }); + servers.push(server); + + const dashboard = await sendRequest(server.port, { + method: "GET", + path: "/", + }); + expect(dashboard.statusCode).toBe(200); + expect(dashboard.body).toContain("No active sessions"); + // Server-rendered running-rows tbody should show empty state, not session rows + expect(dashboard.body).toContain( + 'id="running-rows"><tr><td colspan="7"><p class="empty-state">No active sessions.</p></td></tr>', + ); + }); + it("returns a plain 404 for undefined routes", async () => { const server = await startDashboardServer({ port: 0, @@ -337,23 +510,41 @@ function createSnapshot(): RuntimeSnapshot { counts: { running: 1, retrying: 1, + completed: 0, + failed: 0, }, running: [ { issue_id: "issue-1", issue_identifier: "ABC-123", + issue_title: "ABC-123", state: "In Progress", + pipeline_stage: null, + activity_summary: "Working on tests", session_id: "thread-1-turn-3", turn_count: 3, last_event: "notification", last_message: "Working on tests", started_at: "2026-03-06T09:58:00.000Z", + first_dispatched_at: "2026-03-06T09:58:00.000Z", last_event_at: "2026-03-06T09:59:30.000Z", + stage_duration_seconds: 120, + tokens_per_turn: 667, tokens: { input_tokens: 1200, output_tokens: 800, total_tokens: 2000, + cache_read_tokens: 300, + cache_write_tokens: 150, + reasoning_tokens: 50, }, + total_pipeline_tokens: 2000, + execution_history: [], + turn_history: [], + recent_activity: [], + last_tool_call: null, + health: "green", + health_reason: null, }, ], retrying: [ diff --git a/tests/ops/token-report.test.ts b/tests/ops/token-report.test.ts new file mode 100644 index 00000000..888e7f21 --- /dev/null +++ b/tests/ops/token-report.test.ts @@ -0,0 +1,1228 @@ +/** + * Tests for ops/token-report.mjs — SYMPH-129 + * + * These tests validate the core extraction pipeline by setting up temp + * directories that mimic $SYMPHONY_HOME and $SYMPHONY_LOG_DIR, writing + * synthetic symphony.jsonl events, then invoking the extract subcommand. + */ + +import { execFileSync } from "node:child_process"; +import { randomBytes } from "node:crypto"; +import { + existsSync, + mkdirSync, + readFileSync, + readdirSync, + rmSync, + statSync, + utimesSync, + writeFileSync, +} from "node:fs"; +import { tmpdir } from "node:os"; +import { dirname, join } from "node:path"; +import { fileURLToPath } from "node:url"; +import { afterEach, beforeEach, describe, expect, it } from "vitest"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const SCRIPT_PATH = join(__dirname, "../../ops/token-report.mjs"); +const NODE_BIN = process.execPath; + +function tmpDir() { + const dir = join( + tmpdir(), + `token-report-test-${randomBytes(6).toString("hex")}`, + ); + mkdirSync(dir, { recursive: true }); + return dir; +} + +function runExtract( + symphonyHome: string, + logDir: string, + extraEnv: Record<string, string> = {}, +) { + const env = { + ...process.env, + SYMPHONY_HOME: symphonyHome, + SYMPHONY_LOG_DIR: logDir, + LINEAR_API_KEY: "", // Disable Linear for tests + ...extraEnv, + }; + return execFileSync(NODE_BIN, [SCRIPT_PATH, "extract"], { + env, + encoding: "utf-8", + stdio: ["pipe", "pipe", "pipe"], + timeout: 15000, + }); +} + +function runExtractWithStderr( + symphonyHome: string, + logDir: string, + extraEnv: Record<string, string> = {}, +) { + const env = { + ...process.env, + SYMPHONY_HOME: symphonyHome, + SYMPHONY_LOG_DIR: logDir, + LINEAR_API_KEY: "", // Disable Linear for tests + ...extraEnv, + }; + try { + const stdout = execFileSync(NODE_BIN, [SCRIPT_PATH, "extract"], { + env, + encoding: "utf-8", + timeout: 15000, + }); + return { stdout, stderr: "" }; + } catch (err: unknown) { + const e = err as { stdout?: string; stderr?: string }; + return { stdout: e.stdout || "", stderr: e.stderr || "" }; + } +} + +function makeStageEvent(overrides: Record<string, unknown> = {}) { + return JSON.stringify({ + timestamp: "2026-03-24T10:00:00.000Z", + level: "info", + event: "stage_completed", + message: "Stage completed.", + issue_id: "abc-123", + issue_identifier: "SYMPH-200", + session_id: "sess-1", + stage_name: "implement", + outcome: "completed", + input_tokens: 100, + output_tokens: 200, + total_tokens: 300, + total_input_tokens: 1000, + total_output_tokens: 2000, + total_total_tokens: 3000, + no_cache_tokens: 50, + total_cache_read_tokens: 400, + total_cache_write_tokens: 100, + cache_read_tokens: 40, + cache_write_tokens: 10, + reasoning_tokens: 0, + turns_used: 5, + turn_count: 5, + duration_ms: 60000, + ...overrides, + }); +} + +function readJsonlFile(path: string): Record<string, unknown>[] { + if (!existsSync(path)) return []; + return readFileSync(path, "utf-8") + .trim() + .split("\n") + .filter(Boolean) + .map((l) => JSON.parse(l)); +} + +describe("token-report.mjs extract", () => { + let symphonyHome: string; + let logDir: string; + + beforeEach(() => { + symphonyHome = tmpDir(); + logDir = tmpDir(); + }); + + afterEach(() => { + rmSync(symphonyHome, { recursive: true, force: true }); + rmSync(logDir, { recursive: true, force: true }); + }); + + it("extracts token history from fresh logs across 2 products", () => { + // Setup 2 product log dirs with events + for (const product of ["product-a", "product-b"]) { + const dir = join(logDir, product); + mkdirSync(dir, { recursive: true }); + const events = + product === "product-a" + ? [ + makeStageEvent({ stage_name: "plan" }), + makeStageEvent({ stage_name: "implement" }), + makeStageEvent({ stage_name: "review" }), + ] + : [ + makeStageEvent({ stage_name: "plan" }), + makeStageEvent({ stage_name: "implement" }), + ]; + writeFileSync(join(dir, "symphony.jsonl"), `${events.join("\n")}\n`); + } + + runExtract(symphonyHome, logDir); + + const historyPath = join(symphonyHome, "data", "token-history.jsonl"); + const records = readJsonlFile(historyPath); + expect(records).toHaveLength(5); + + // Product field derived from directory path + const productA = records.filter((r) => r.product === "product-a"); + const productB = records.filter((r) => r.product === "product-b"); + expect(productA).toHaveLength(3); + expect(productB).toHaveLength(2); + + // Config history should have 1 record + const configPath = join(symphonyHome, "data", "config-history.jsonl"); + const configs = readJsonlFile(configPath); + expect(configs).toHaveLength(1); + expect(configs[0]!.config_hashes).toBeDefined(); + + // HWM files should exist + const hwmDir = join(symphonyHome, "data", ".hwm"); + expect(existsSync(hwmDir)).toBe(true); + }); + + it("extracts both completed and failed stages", () => { + const dir = join(logDir, "myproduct"); + mkdirSync(dir, { recursive: true }); + const events = [ + makeStageEvent({ outcome: "completed", stage_name: "s1" }), + makeStageEvent({ outcome: "completed", stage_name: "s2" }), + makeStageEvent({ outcome: "completed", stage_name: "s3" }), + makeStageEvent({ outcome: "failed", stage_name: "s4" }), + makeStageEvent({ outcome: "failed", stage_name: "s5" }), + ]; + writeFileSync(join(dir, "symphony.jsonl"), `${events.join("\n")}\n`); + + runExtract(symphonyHome, logDir); + + const records = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ); + expect(records).toHaveLength(5); + const completed = records.filter((r) => r.outcome === "completed"); + const failed = records.filter((r) => r.outcome === "failed"); + expect(completed).toHaveLength(3); + expect(failed).toHaveLength(2); + }); + + it("idempotent re-extraction produces no duplicates", () => { + const dir = join(logDir, "prod"); + mkdirSync(dir, { recursive: true }); + writeFileSync( + join(dir, "symphony.jsonl"), + `${[makeStageEvent(), makeStageEvent({ stage_name: "review" })].join("\n")}\n`, + ); + + runExtract(symphonyHome, logDir); + const countBefore = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ).length; + + // Run again — no new events + runExtract(symphonyHome, logDir); + const countAfter = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ).length; + + expect(countAfter).toBe(countBefore); + + // Config history gains exactly 1 new snapshot + const configs = readJsonlFile( + join(symphonyHome, "data", "config-history.jsonl"), + ); + expect(configs).toHaveLength(2); + }); + + it("handles HWM recovery after file truncation", () => { + const dir = join(logDir, "prod"); + mkdirSync(dir, { recursive: true }); + const logPath = join(dir, "symphony.jsonl"); + + // Write 3 events and extract + writeFileSync( + logPath, + `${[makeStageEvent({ stage_name: "s1" }), makeStageEvent({ stage_name: "s2" }), makeStageEvent({ stage_name: "s3" })].join("\n")}\n`, + ); + runExtract(symphonyHome, logDir); + expect( + readJsonlFile(join(symphonyHome, "data", "token-history.jsonl")), + ).toHaveLength(3); + + // Truncate the file and write new event + writeFileSync(logPath, `${makeStageEvent({ stage_name: "s4" })}\n`); + + // Extract should detect truncation and re-read + runExtract(symphonyHome, logDir); + const records = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ); + expect(records).toHaveLength(4); + expect(records[3]!.stage_name).toBe("s4"); + }); + + it("discards partial line at EOF during active writing", () => { + const dir = join(logDir, "prod"); + mkdirSync(dir, { recursive: true }); + const logPath = join(dir, "symphony.jsonl"); + + // Write one complete event + one partial + const completeEvent = makeStageEvent({ stage_name: "complete" }); + writeFileSync(logPath, `${completeEvent}\n{"event":"stage_com`); + + runExtract(symphonyHome, logDir); + const records = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ); + expect(records).toHaveLength(1); + expect(records[0]!.stage_name).toBe("complete"); + + // Now complete the partial line and add a newline + writeFileSync( + logPath, + `${completeEvent}\n${makeStageEvent({ stage_name: "was-partial" })}\n`, + ); + + runExtract(symphonyHome, logDir); + const records2 = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ); + // Should pick up the now-completed line + expect(records2).toHaveLength(2); + expect(records2[1]!.stage_name).toBe("was-partial"); + }); + + it("skips malformed JSONL lines without failing", () => { + const dir = join(logDir, "prod"); + mkdirSync(dir, { recursive: true }); + const logPath = join(dir, "symphony.jsonl"); + + const lines = []; + for (let i = 0; i < 10; i++) { + lines.push(makeStageEvent({ stage_name: `s${i}` })); + } + // Insert 2 malformed lines + lines.splice(3, 0, "THIS IS NOT JSON"); + lines.splice(7, 0, "{broken json{{"); + + writeFileSync(logPath, `${lines.join("\n")}\n`); + + // Run with captured stderr + const env = { + ...process.env, + SYMPHONY_HOME: symphonyHome, + SYMPHONY_LOG_DIR: logDir, + LINEAR_API_KEY: "", + }; + try { + execFileSync(NODE_BIN, [SCRIPT_PATH, "extract"], { + env, + encoding: "utf-8", + timeout: 15000, + }); + } catch { + // extract logs warnings to stderr but shouldn't throw + } + + // Fallback: if the above didn't throw, read normally + const records = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ); + expect(records).toHaveLength(10); + }); + + it("handles empty log directory gracefully", () => { + const dir = join(logDir, "emptyproduct"); + mkdirSync(dir, { recursive: true }); + writeFileSync(join(dir, "symphony.jsonl"), ""); // Empty file + + runExtract(symphonyHome, logDir); + + const historyPath = join(symphonyHome, "data", "token-history.jsonl"); + if (existsSync(historyPath)) { + const content = readFileSync(historyPath, "utf-8").trim(); + if (content.length > 0) { + const records = content.split("\n").map((l) => JSON.parse(l)); + const emptyRecords = records.filter( + (r) => r.product === "emptyproduct", + ); + expect(emptyRecords).toHaveLength(0); + } + } + }); + + it("graceful degradation without Linear auth", () => { + const dir = join(logDir, "prod"); + mkdirSync(dir, { recursive: true }); + writeFileSync( + join(dir, "symphony.jsonl"), + `${makeStageEvent({ issue_identifier: "SYMPH-999" })}\n`, + ); + + runExtract(symphonyHome, logDir, { LINEAR_API_KEY: "" }); + + const records = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ); + expect(records).toHaveLength(1); + expect(records[0]!.issue_title).toBeNull(); + }); + + it("maps all required fields correctly", () => { + const dir = join(logDir, "myproduct"); + mkdirSync(dir, { recursive: true }); + writeFileSync( + join(dir, "symphony.jsonl"), + `${makeStageEvent({ + total_input_tokens: 5000, + total_output_tokens: 3000, + total_total_tokens: 8000, + no_cache_tokens: 1500, + total_cache_read_tokens: 2000, + total_cache_write_tokens: 500, + })}\n`, + ); + + runExtract(symphonyHome, logDir); + + const records = readJsonlFile( + join(symphonyHome, "data", "token-history.jsonl"), + ); + expect(records).toHaveLength(1); + const r = records[0]!; + expect(r.product).toBe("myproduct"); + expect(r.stage_name).toBe("implement"); + expect(r.total_input_tokens).toBe(5000); + expect(r.total_output_tokens).toBe(3000); + expect(r.total_total_tokens).toBe(8000); + expect(r.no_cache_tokens).toBe(1500); + expect(r.total_cache_read_tokens).toBe(2000); + expect(r.total_cache_write_tokens).toBe(500); + expect(r.outcome).toBe("completed"); + expect(r.extracted_at).toBeDefined(); + }); +}); + +// --------------------------------------------------------------------------- +// Analyze subcommand tests — SYMPH-130 +// --------------------------------------------------------------------------- + +function makeTokenRecord(overrides: Record<string, unknown> = {}) { + return { + timestamp: "2026-03-20T10:00:00.000Z", + product: "symphony", + issue_id: "abc-123", + issue_identifier: "SYMPH-200", + issue_title: "Some task", + session_id: "sess-1", + stage_name: "implement", + outcome: "completed", + total_input_tokens: 1000, + total_output_tokens: 2000, + total_total_tokens: 3000, + no_cache_tokens: 50, + total_cache_read_tokens: 400, + total_cache_write_tokens: 100, + input_tokens: 100, + output_tokens: 200, + total_tokens: 300, + cache_read_tokens: 40, + cache_write_tokens: 10, + reasoning_tokens: 0, + turns_used: 5, + duration_ms: 60000, + extracted_at: "2026-03-20T10:05:00.000Z", + ...overrides, + }; +} + +function makeConfigSnapshot(overrides: Record<string, unknown> = {}) { + return { + timestamp: "2026-03-20T10:00:00.000Z", + config_hashes: { "pipeline-config/review/SKILL.md": "abc123" }, + file_count: 1, + ...overrides, + }; +} + +function writeTokenHistory( + symphonyHome: string, + records: Record<string, unknown>[], +) { + const dataDir = join(symphonyHome, "data"); + mkdirSync(dataDir, { recursive: true }); + mkdirSync(join(dataDir, "linear-cache"), { recursive: true }); + const path = join(dataDir, "token-history.jsonl"); + writeFileSync(path, `${records.map((r) => JSON.stringify(r)).join("\n")}\n`); +} + +function writeConfigHistory( + symphonyHome: string, + records: Record<string, unknown>[], +) { + const dataDir = join(symphonyHome, "data"); + mkdirSync(dataDir, { recursive: true }); + const path = join(dataDir, "config-history.jsonl"); + writeFileSync(path, `${records.map((r) => JSON.stringify(r)).join("\n")}\n`); +} + +function runAnalyze( + symphonyHome: string, + extraEnv: Record<string, string> = {}, +) { + const env = { + ...process.env, + SYMPHONY_HOME: symphonyHome, + SYMPHONY_LOG_DIR: join(symphonyHome, "logs"), + LINEAR_API_KEY: "", // Disable Linear for tests + ...extraEnv, + }; + const stdout = execFileSync(NODE_BIN, [SCRIPT_PATH, "analyze"], { + env, + encoding: "utf-8", + stdio: ["pipe", "pipe", "pipe"], + timeout: 15000, + }); + return JSON.parse(stdout); +} + +/** + * Generate N days of token records spread across the date range. + */ +function generateDaysOfRecords( + days: number, + perDay: number, + baseOverrides: Record<string, unknown> = {}, +) { + const records: Record<string, unknown>[] = []; + const now = new Date(); + for (let d = 0; d < days; d++) { + const date = new Date(now); + date.setDate(date.getDate() - d); + for (let i = 0; i < perDay; i++) { + const ts = new Date(date); + ts.setHours(10 + i, 0, 0, 0); + records.push( + makeTokenRecord({ + timestamp: ts.toISOString(), + issue_identifier: `SYMPH-${200 + d}`, + issue_id: `id-${200 + d}`, + ...baseOverrides, + }), + ); + } + } + return records; +} + +describe("token-report.mjs analyze", () => { + let symphonyHome: string; + + beforeEach(() => { + symphonyHome = tmpDir(); + }); + + afterEach(() => { + rmSync(symphonyHome, { recursive: true, force: true }); + }); + + it("efficiency scorecard computation with 30+ days", () => { + const records = generateDaysOfRecords(35, 2); + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + // Check all scorecard fields exist + const sc = result.efficiency_scorecard; + expect(sc.cache_efficiency).toBeDefined(); + expect(sc.output_ratio).toBeDefined(); + expect(sc.wasted_context).toBeDefined(); + expect(sc.tokens_per_turn).toBeDefined(); + expect(sc.first_pass_rate).toBeDefined(); + expect(sc.failure_rate).toBeDefined(); + + // Each metric has current, trend_7d, trend_30d + expect(sc.cache_efficiency.current).toBeTypeOf("number"); + expect(sc.cache_efficiency.trend_7d).toBeTypeOf("number"); + expect(sc.cache_efficiency.trend_30d).toBeTypeOf("number"); + + // Verify cache_efficiency formula: cache_read / (input + cache_read) * 100 + // With defaults: 400 / (1000 + 400) * 100 = 28.6 (approx) + expect(sc.cache_efficiency.current).toBeCloseTo(28.6, 0); + + // Verify wasted_context formula: no_cache / input * 100 + // With defaults: 50 / 1000 * 100 = 5 + expect(sc.wasted_context.current).toBeCloseTo(5, 0); + }); + + it("failed stages excluded from efficiency but included in spend", () => { + const completed = Array.from({ length: 20 }, (_, i) => + makeTokenRecord({ + timestamp: new Date(Date.now() - i * 24 * 60 * 60 * 1000).toISOString(), + stage_name: "implement", + outcome: "completed", + issue_identifier: `SYMPH-${300 + i}`, + total_input_tokens: 1000, + total_cache_read_tokens: 400, + }), + ); + const failed = Array.from({ length: 5 }, (_, i) => + makeTokenRecord({ + timestamp: new Date( + Date.now() - (20 + i) * 24 * 60 * 60 * 1000, + ).toISOString(), + stage_name: "implement", + outcome: "failed", + issue_identifier: `SYMPH-${320 + i}`, + total_input_tokens: 500, + total_cache_read_tokens: 0, + }), + ); + writeTokenHistory(symphonyHome, [...completed, ...failed]); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + // Per-stage spend includes all 25 + expect(result.per_stage_spend.implement.count).toBe(25); + expect(result.per_stage_spend.implement.completed).toBe(20); + expect(result.per_stage_spend.implement.failed).toBe(5); + + // failure_rate for implement = 5/25 = 20% + expect(result.efficiency_scorecard.failure_rate.current.implement).toBe(20); + }); + + it("first-pass rate computation", () => { + // SYMPH-100: 1 implement completed (first-pass) + // SYMPH-101: 2 implement completed (rework) + // SYMPH-102: 1 implement completed (first-pass) + const records = [ + makeTokenRecord({ + issue_identifier: "SYMPH-100", + stage_name: "implement", + outcome: "completed", + timestamp: new Date(Date.now() - 1000).toISOString(), + }), + makeTokenRecord({ + issue_identifier: "SYMPH-101", + stage_name: "implement", + outcome: "completed", + timestamp: new Date(Date.now() - 2000).toISOString(), + }), + makeTokenRecord({ + issue_identifier: "SYMPH-101", + stage_name: "implement", + outcome: "completed", + timestamp: new Date(Date.now() - 3000).toISOString(), + }), + makeTokenRecord({ + issue_identifier: "SYMPH-102", + stage_name: "implement", + outcome: "completed", + timestamp: new Date(Date.now() - 4000).toISOString(), + }), + ]; + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + // first_pass_rate = 1 - (1/3) = 66.7% + const fpr = result.efficiency_scorecard.first_pass_rate.current; + expect(fpr).toBeGreaterThan(66); + expect(fpr).toBeLessThan(67); + }); + + it("per-stage utilization trend with config-change markers", () => { + const records = generateDaysOfRecords(35, 1, { + stage_name: "investigate", + }); + const moreRecords = generateDaysOfRecords(35, 1, { + stage_name: "implement", + }); + const reviewRecords = generateDaysOfRecords(35, 1, { + stage_name: "review", + }); + const mergeRecords = generateDaysOfRecords(35, 1, { + stage_name: "merge", + }); + writeTokenHistory(symphonyHome, [ + ...records, + ...moreRecords, + ...reviewRecords, + ...mergeRecords, + ]); + writeConfigHistory(symphonyHome, [ + makeConfigSnapshot({ + timestamp: new Date( + Date.now() - 10 * 24 * 60 * 60 * 1000, + ).toISOString(), + }), + makeConfigSnapshot({ + timestamp: new Date(Date.now() - 5 * 24 * 60 * 60 * 1000).toISOString(), + config_hashes: { "pipeline-config/review/SKILL.md": "changed123" }, + }), + ]); + + const result = runAnalyze(symphonyHome); + + // At least 4 stage types + const stageKeys = Object.keys(result.per_stage_trend); + expect(stageKeys.length).toBeGreaterThanOrEqual(4); + expect(stageKeys).toContain("investigate"); + expect(stageKeys).toContain("implement"); + expect(stageKeys).toContain("review"); + expect(stageKeys).toContain("merge"); + + // Config changes should be present + expect(result.per_stage_trend.implement.config_changes).toBeDefined(); + expect( + result.per_stage_trend.implement.config_changes.length, + ).toBeGreaterThanOrEqual(1); + }); + + it("per-ticket cost trend with median and mean", () => { + const records = generateDaysOfRecords(35, 2); + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + expect(result.per_ticket_trend.median).toBeDefined(); + expect(result.per_ticket_trend.mean).toBeDefined(); + expect(result.per_ticket_trend.ticket_count).toBeGreaterThan(0); + }); + + it("WoW delta computation with 14+ days", () => { + // Create records with different token counts for current vs prior week + // Use noon timestamps to avoid midnight boundary issues with daysAgo() + const records: Record<string, unknown>[] = []; + const now = new Date(); + + // Current week (days 1-6): 5000 tokens each — use midday timestamps + for (let d = 1; d <= 6; d++) { + const date = new Date(now); + date.setDate(date.getDate() - d); + date.setHours(12, 0, 0, 0); + records.push( + makeTokenRecord({ + timestamp: date.toISOString(), + total_total_tokens: 5000, + issue_identifier: `SYMPH-C${d}`, + }), + ); + } + + // Prior week (days 8-13): 4000 tokens each — use midday timestamps + for (let d = 8; d <= 13; d++) { + const date = new Date(now); + date.setDate(date.getDate() - d); + date.setHours(12, 0, 0, 0); + records.push( + makeTokenRecord({ + timestamp: date.toISOString(), + total_total_tokens: 4000, + issue_identifier: `SYMPH-P${d}`, + }), + ); + } + + // Add an anchor record 15 days ago to ensure span >= 14 + const anchor = new Date(now); + anchor.setDate(anchor.getDate() - 15); + anchor.setHours(12, 0, 0, 0); + records.push( + makeTokenRecord({ + timestamp: anchor.toISOString(), + total_total_tokens: 4000, + issue_identifier: "SYMPH-ANCHOR", + }), + ); + + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + // wow_delta_pct should exist and be non-null + expect( + result.executive_summary.total_tokens.wow_delta_pct, + ).not.toBeUndefined(); + expect(result.executive_summary.total_tokens.wow_delta_pct).not.toBeNull(); + // Current week: 6*5000 = 30000, Prior week: 6*4000 = 24000 + // WoW = (30000 - 24000) / 24000 * 100 = 25% + expect(result.executive_summary.total_tokens.wow_delta_pct).toBe(25); + }); + + it("per-product breakdown", () => { + const records = [ + ...generateDaysOfRecords(5, 1, { product: "symphony" }), + ...generateDaysOfRecords(5, 1, { product: "jony" }), + ...generateDaysOfRecords(5, 1, { product: "stickerlabs" }), + ]; + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + expect(Object.keys(result.per_product).length).toBe(3); + expect(result.per_product.symphony).toBeDefined(); + expect(result.per_product.jony).toBeDefined(); + expect(result.per_product.stickerlabs).toBeDefined(); + }); + + it("inflection detection returns array structure", () => { + // Generate 35 days with a spike pattern in the last 7 days + const records: Record<string, unknown>[] = []; + const now = new Date(); + + // Days 8-34: normal (3000 tokens) + for (let d = 8; d < 35; d++) { + const date = new Date(now); + date.setDate(date.getDate() - d); + records.push( + makeTokenRecord({ + timestamp: date.toISOString(), + stage_name: "implement", + total_total_tokens: 3000, + issue_identifier: `SYMPH-N${d}`, + }), + ); + } + + // Days 0-7: spike (6000 tokens — >15% above baseline) + for (let d = 0; d < 7; d++) { + const date = new Date(now); + date.setDate(date.getDate() - d); + records.push( + makeTokenRecord({ + timestamp: date.toISOString(), + stage_name: "implement", + total_total_tokens: 6000, + issue_identifier: `SYMPH-S${d}`, + }), + ); + } + + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + // inflections should be an array + expect(Array.isArray(result.inflections)).toBe(true); + // With the spike, we should detect an inflection + expect(result.inflections.length).toBeGreaterThanOrEqual(1); + if (result.inflections.length > 0) { + expect(result.inflections[0].attributions).toBeDefined(); + expect(Array.isArray(result.inflections[0].attributions)).toBe(true); + } + }); + + it("inflection detection with config-change correlation", () => { + const records: Record<string, unknown>[] = []; + const now = new Date(); + + // Days 8-34: normal (3000 tokens) for review stage + for (let d = 8; d < 35; d++) { + const date = new Date(now); + date.setDate(date.getDate() - d); + records.push( + makeTokenRecord({ + timestamp: date.toISOString(), + stage_name: "review", + total_total_tokens: 3000, + issue_identifier: `SYMPH-R${d}`, + }), + ); + } + + // Days 0-7: dropped (2000 tokens — drop >15%) + for (let d = 0; d < 7; d++) { + const date = new Date(now); + date.setDate(date.getDate() - d); + records.push( + makeTokenRecord({ + timestamp: date.toISOString(), + stage_name: "review", + total_total_tokens: 2000, + issue_identifier: `SYMPH-RD${d}`, + }), + ); + } + + writeTokenHistory(symphonyHome, records); + + // Config change 2 days before the 7d boundary + const d7 = new Date(now); + d7.setDate(d7.getDate() - 7); + const configChangeDate = new Date(d7); + configChangeDate.setDate(configChangeDate.getDate() - 1); + + writeConfigHistory(symphonyHome, [ + makeConfigSnapshot({ + timestamp: new Date( + now.getTime() - 30 * 24 * 60 * 60 * 1000, + ).toISOString(), + config_hashes: { "pipeline-config/review/SKILL.md": "oldhash" }, + }), + makeConfigSnapshot({ + timestamp: configChangeDate.toISOString(), + config_hashes: { "pipeline-config/review/SKILL.md": "newhash" }, + }), + ]); + + const result = runAnalyze(symphonyHome); + + expect(Array.isArray(result.inflections)).toBe(true); + // Should detect the decrease + if (result.inflections.length > 0) { + expect(result.inflections[0].attributions.length).toBeGreaterThanOrEqual( + 0, + ); + } + }); + + it("outlier detection with Linear hypothesis structure", () => { + const records: Record<string, unknown>[] = []; + const now = new Date(); + + // Normal issues: ~3000 tokens each + for (let i = 0; i < 20; i++) { + records.push( + makeTokenRecord({ + timestamp: new Date( + now.getTime() - i * 24 * 60 * 60 * 1000, + ).toISOString(), + issue_identifier: `SYMPH-${400 + i}`, + issue_id: `id-${400 + i}`, + total_total_tokens: 3000, + stage_name: "implement", + }), + ); + } + + // Outlier issue: 127000 tokens (way above 2σ) + records.push( + makeTokenRecord({ + timestamp: new Date( + now.getTime() - 1 * 24 * 60 * 60 * 1000, + ).toISOString(), + issue_identifier: "SYMPH-145", + issue_id: "id-145", + total_total_tokens: 127000, + stage_name: "implement", + }), + ); + + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + // Should detect outlier + expect(Array.isArray(result.outliers)).toBe(true); + expect(result.outliers.length).toBeGreaterThanOrEqual(1); + + const outlier = result.outliers.find( + (o: Record<string, unknown>) => o.issue_identifier === "SYMPH-145", + ); + expect(outlier).toBeDefined(); + expect(outlier.total_tokens).toBe(127000); + expect(outlier.z_score).toBeGreaterThan(2); + expect(outlier.hypothesis).toBeDefined(); + // Without LINEAR_API_KEY, parent is null + expect(outlier.parent).toBeNull(); + expect(outlier.hypothesis).toContain("unavailable"); + }); + + it("cold start with insufficient data (<7 days)", () => { + const records = generateDaysOfRecords(3, 2); + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const result = runAnalyze(symphonyHome); + + expect(result.cold_start).toBe(true); + expect(result.cold_start_tier).toBe("<7d"); + + // Raw daily numbers still included + expect(result.efficiency_scorecard).toBeDefined(); + expect(result.per_stage_spend).toBeDefined(); + + // Inflections and outliers labeled insufficient data + expect(result.inflections.status).toBe("insufficient data"); + expect(result.outliers.status).toBe("insufficient data"); + }); + + it("empty token history produces valid cold start output", () => { + // Just create the data dir without writing any records + mkdirSync(join(symphonyHome, "data", "linear-cache"), { recursive: true }); + + const result = runAnalyze(symphonyHome); + + expect(result.cold_start).toBe(true); + expect(result.efficiency_scorecard).toBeDefined(); + expect(result.executive_summary).toBeDefined(); + expect(result.per_product).toEqual({}); + expect(result.outliers).toEqual([]); + }); +}); + +// --------------------------------------------------------------------------- +// Render subcommand tests — SYMPH-131 +// --------------------------------------------------------------------------- + +function runRender( + symphonyHome: string, + extraEnv: Record<string, string> = {}, +) { + const env = { + ...process.env, + SYMPHONY_HOME: symphonyHome, + SYMPHONY_LOG_DIR: join(symphonyHome, "logs"), + LINEAR_API_KEY: "", + ...extraEnv, + }; + return execFileSync(NODE_BIN, [SCRIPT_PATH, "render"], { + env, + encoding: "utf-8", + stdio: ["pipe", "pipe", "pipe"], + timeout: 15000, + }); +} + +describe("token-report.mjs render", () => { + let symphonyHome: string; + + beforeEach(() => { + symphonyHome = tmpDir(); + }); + + afterEach(() => { + rmSync(symphonyHome, { recursive: true, force: true }); + }); + + it("generates self-contained HTML with all 8 sections", () => { + const records = generateDaysOfRecords(10, 3); + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + runRender(symphonyHome); + + const today = new Date().toISOString().slice(0, 10); + const htmlPath = join(symphonyHome, "reports", `${today}.html`); + expect(existsSync(htmlPath)).toBe(true); + + const html = readFileSync(htmlPath, "utf-8"); + + // Self-contained: no external resources (no http:// or https:// in link/script/img tags — except Linear links) + const externalRefs = html.match( + /<(?:link|script|img)[^>]*(?:src|href)=["']https?:\/\//gi, + ); + expect(externalRefs).toBeNull(); + + // All 8 sections present + expect(html).toContain("Executive Summary"); + expect(html).toContain("Efficiency Scorecard"); + expect(html).toContain("Per-Stage Utilization Trend"); + expect(html).toContain("Per-Ticket Cost Trend"); + expect(html).toContain("Outlier Analysis"); + expect(html).toContain("Issue Leaderboard"); + expect(html).toContain("Stage Efficiency"); + expect(html).toContain("Per-Product Breakdown"); + + // Inline SVG elements present + expect(html).toContain("<svg"); + expect(html).toContain("<polyline"); + + // WCAG AA: dark theme styles present + expect(html).toContain("--bg: #0d1117"); + expect(html).toContain("--text: #c9d1d9"); + }); + + it("renders with empty data (cold start)", () => { + mkdirSync(join(symphonyHome, "data", "linear-cache"), { recursive: true }); + + runRender(symphonyHome); + + const today = new Date().toISOString().slice(0, 10); + const htmlPath = join(symphonyHome, "reports", `${today}.html`); + expect(existsSync(htmlPath)).toBe(true); + + const html = readFileSync(htmlPath, "utf-8"); + expect(html).toContain("Executive Summary"); + expect(html).toContain("<svg"); + }); +}); + +// --------------------------------------------------------------------------- +// Slack subcommand tests — SYMPH-131 +// --------------------------------------------------------------------------- + +function runSlack(symphonyHome: string, extraEnv: Record<string, string> = {}) { + const env = { + ...process.env, + SYMPHONY_HOME: symphonyHome, + SYMPHONY_LOG_DIR: join(symphonyHome, "logs"), + LINEAR_API_KEY: "", + ...extraEnv, + }; + try { + const stdout = execFileSync(NODE_BIN, [SCRIPT_PATH, "slack"], { + env, + encoding: "utf-8", + timeout: 15000, + }); + return { stdout, stderr: "", exitCode: 0 }; + } catch (err: unknown) { + const e = err as { stdout?: string; stderr?: string; status?: number }; + return { + stdout: e.stdout || "", + stderr: e.stderr || "", + exitCode: e.status ?? 1, + }; + } +} + +describe("token-report.mjs slack", () => { + let symphonyHome: string; + + beforeEach(() => { + symphonyHome = tmpDir(); + }); + + afterEach(() => { + rmSync(symphonyHome, { recursive: true, force: true }); + }); + + it("graceful degradation when SLACK_WEBHOOK_URL not set", () => { + const records = generateDaysOfRecords(5, 2); + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const env: Record<string, string> = {}; + // Explicitly unset SLACK_WEBHOOK_URL + process.env.SLACK_WEBHOOK_URL = undefined; + const { exitCode, stderr } = runSlack(symphonyHome, env); + + expect(exitCode).toBe(0); + // stderr should contain warning (captured by parent process) + }); + + it("exits 0 when SLACK_WEBHOOK_URL is empty", () => { + const records = generateDaysOfRecords(5, 2); + writeTokenHistory(symphonyHome, records); + writeConfigHistory(symphonyHome, [makeConfigSnapshot()]); + + const { exitCode } = runSlack(symphonyHome, { SLACK_WEBHOOK_URL: "" }); + expect(exitCode).toBe(0); + }); +}); + +// --------------------------------------------------------------------------- +// Rotate subcommand tests — SYMPH-131 +// --------------------------------------------------------------------------- + +function runRotate( + symphonyHome: string, + extraEnv: Record<string, string> = {}, +) { + const env = { + ...process.env, + SYMPHONY_HOME: symphonyHome, + SYMPHONY_LOG_DIR: join(symphonyHome, "logs"), + LINEAR_API_KEY: "", + ...extraEnv, + }; + return execFileSync(NODE_BIN, [SCRIPT_PATH, "rotate"], { + env, + encoding: "utf-8", + stdio: ["pipe", "pipe", "pipe"], + timeout: 15000, + }); +} + +describe("token-report.mjs rotate", () => { + let symphonyHome: string; + + beforeEach(() => { + symphonyHome = tmpDir(); + }); + + afterEach(() => { + rmSync(symphonyHome, { recursive: true, force: true }); + }); + + it("compresses JSONL files older than 7 days", () => { + const dataDir = join(symphonyHome, "data"); + mkdirSync(dataDir, { recursive: true }); + + const oldFile = join(dataDir, "old-log.jsonl"); + writeFileSync(oldFile, '{"test": true}\n'); + + // Set mtime to 10 days ago + const tenDaysAgo = new Date(Date.now() - 10 * 24 * 60 * 60 * 1000); + utimesSync(oldFile, tenDaysAgo, tenDaysAgo); + + runRotate(symphonyHome); + + // Original should be gone, compressed should exist + expect(existsSync(oldFile)).toBe(false); + expect(existsSync(`${oldFile}.gz`)).toBe(true); + }); + + it("deletes compressed files older than 14 days", () => { + const dataDir = join(symphonyHome, "data"); + mkdirSync(dataDir, { recursive: true }); + + const oldGz = join(dataDir, "ancient-log.jsonl.gz"); + writeFileSync(oldGz, "compressed-data"); + + // Set mtime to 20 days ago + const twentyDaysAgo = new Date(Date.now() - 20 * 24 * 60 * 60 * 1000); + utimesSync(oldGz, twentyDaysAgo, twentyDaysAgo); + + runRotate(symphonyHome); + + expect(existsSync(oldGz)).toBe(false); + }); + + it("does not touch files with mtime less than 2 hours", () => { + const dataDir = join(symphonyHome, "data"); + mkdirSync(dataDir, { recursive: true }); + + const recentFile = join(dataDir, "recent.jsonl"); + writeFileSync(recentFile, '{"test": true}\n'); + // File was just created, mtime < 2h + + runRotate(symphonyHome); + + // File should still exist and not be compressed + expect(existsSync(recentFile)).toBe(true); + expect(existsSync(`${recentFile}.gz`)).toBe(false); + }); + + it("deletes HTML reports older than 90 days", () => { + const reportsDir = join(symphonyHome, "reports"); + mkdirSync(reportsDir, { recursive: true }); + + const oldReport = join(reportsDir, "2025-01-01.html"); + writeFileSync(oldReport, "<html></html>"); + + // Set mtime to 100 days ago + const hundredDaysAgo = new Date(Date.now() - 100 * 24 * 60 * 60 * 1000); + utimesSync(oldReport, hundredDaysAgo, hundredDaysAgo); + + runRotate(symphonyHome); + + expect(existsSync(oldReport)).toBe(false); + }); + + it("preserves recent HTML reports", () => { + const reportsDir = join(symphonyHome, "reports"); + mkdirSync(reportsDir, { recursive: true }); + + const recentReport = join(reportsDir, "2026-03-20.html"); + writeFileSync(recentReport, "<html></html>"); + + runRotate(symphonyHome); + + expect(existsSync(recentReport)).toBe(true); + }); +}); diff --git a/tests/orchestrator/core.test.ts b/tests/orchestrator/core.test.ts index 9c119d6c..1433e39f 100644 --- a/tests/orchestrator/core.test.ts +++ b/tests/orchestrator/core.test.ts @@ -5,6 +5,7 @@ import type { Issue } from "../../src/domain/model.js"; import { OrchestratorCore, type OrchestratorCoreOptions, + classifyExitOutcome, computeFailureRetryDelayMs, sortIssuesForDispatch, } from "../../src/orchestrator/core.js"; @@ -65,6 +66,69 @@ describe("orchestrator core", () => { ).toBe(true); }); + it("rejects non-Todo issues with non-terminal blockers", () => { + const orchestrator = createOrchestrator(); + + expect( + orchestrator.isDispatchEligible( + createIssue({ + id: "ip-1", + identifier: "ISSUE-IP-1", + state: "In Progress", + blockedBy: [{ id: "b1", identifier: "B-1", state: "In Progress" }], + }), + ), + ).toBe(false); + + expect( + orchestrator.isDispatchEligible( + createIssue({ + id: "ip-2", + identifier: "ISSUE-IP-2", + state: "In Progress", + blockedBy: [{ id: "b2", identifier: "B-2", state: "Done" }], + }), + ), + ).toBe(true); + }); + + it("rejects Resume-state issues with non-terminal blockers", () => { + // Resume is an active state in some configurations — blockedBy check must + // apply to it just like Todo and In Progress (SYMPH-50). + const config = createConfig(); + config.tracker.activeStates = [ + "Todo", + "In Progress", + "In Review", + "Resume", + ]; + const orchestrator = createOrchestrator({ config }); + + // Blocked by a non-terminal issue → must NOT dispatch + expect( + orchestrator.isDispatchEligible( + createIssue({ + id: "resume-1", + identifier: "ISSUE-RESUME-1", + state: "Resume", + blockedBy: [{ id: "b1", identifier: "B-1", state: "In Progress" }], + }), + ), + ).toBe(false); + + // Blocked by a terminal issue → may dispatch + expect( + orchestrator.isDispatchEligible( + createIssue({ + id: "resume-2", + identifier: "ISSUE-RESUME-2", + state: "Resume", + blockedBy: [{ id: "b2", identifier: "B-2", state: "Done" }], + }), + ), + ).toBe(true); + }); + it("dispatches eligible issues on poll tick until slots are exhausted", async () => { const orchestrator = createOrchestrator({ tracker: createTracker({ @@ -282,6 +346,7 @@ describe("orchestrator core", () => { dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), timerHandle: null, error: "previous failure", + delayType: "failure", }; const result = await orchestrator.onRetryTimer("1"); @@ -331,186 +396,2975 @@ describe("orchestrator core", () => { reason: "stall_timeout", }); }); -}); -describe("orchestrator core integration flows", () => { - it("redispatches a retried issue through a fake runner boundary after an abnormal exit", async () => { - const harness = createIntegrationHarness(); + it("skips all dispatch when an open pipeline-halt issue exists", async () => { + const haltIssue = createIssue({ + id: "halt-1", + identifier: "SYMPH-123", + title: "Main branch build broken", + state: "In Progress", + labels: ["pipeline-halt"], + }); - const initialTick = await harness.orchestrator.pollTick(); + const regularIssues = [ + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + createIssue({ id: "2", identifier: "ISSUE-2", state: "Todo" }), + ]; - expect(initialTick.dispatchedIssueIds).toEqual(["1"]); - expect(harness.spawnCalls).toEqual([ - { - issueId: "1", - issueIdentifier: "ISSUE-1", - attempt: null, + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return regularIssues; }, - ]); - expect([...harness.orchestrator.getState().claimed]).toEqual(["1"]); + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + async fetchIssuesByLabels(labelNames: string[]) { + if (labelNames.includes("pipeline-halt")) { + return [haltIssue]; + } + return []; + }, + }; - const retryEntry = harness.orchestrator.onWorkerExit({ - issueId: "1", - outcome: "abnormal", - reason: "turn failed", - }); + const orchestrator = createOrchestrator({ tracker }); + const result = await orchestrator.pollTick(); - expect(retryEntry).toMatchObject({ - issueId: "1", - attempt: 1, - error: "worker exited: turn failed", - }); - expect(harness.orchestrator.getState().running).toEqual({}); + expect(result.validation.ok).toBe(true); + expect(result.dispatchedIssueIds).toEqual([]); + expect(Object.keys(orchestrator.getState().running)).toEqual([]); + }); - const retryResult = await harness.orchestrator.onRetryTimer("1"); + it("dispatches normally when no pipeline-halt issue exists", async () => { + const regularIssues = [ + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + createIssue({ id: "2", identifier: "ISSUE-2", state: "Todo" }), + ]; - expect(retryResult).toEqual({ - dispatched: true, - released: false, - retryEntry: null, - }); - expect(harness.spawnCalls).toEqual([ - { - issueId: "1", - issueIdentifier: "ISSUE-1", - attempt: null, + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return regularIssues; }, - { - issueId: "1", - issueIdentifier: "ISSUE-1", - attempt: 1, + async fetchIssuesByStates() { + return []; }, - ]); - expect(harness.orchestrator.getState().running["1"]?.retryAttempt).toBe(1); - expect([...harness.orchestrator.getState().claimed]).toEqual(["1"]); + async fetchIssueStatesByIds() { + return []; + }, + async fetchIssuesByLabels() { + return []; + }, + }; + + const orchestrator = createOrchestrator({ tracker }); + const result = await orchestrator.pollTick(); + + expect(result.validation.ok).toBe(true); + expect(result.dispatchedIssueIds).toEqual(["1", "2"]); + expect(Object.keys(orchestrator.getState().running)).toEqual(["1", "2"]); }); - it("requests terminal cleanup through the fake runner boundary and releases the claim once the issue disappears", async () => { - const harness = createIntegrationHarness(); + it("dispatches normally when pipeline-halt issue is in terminal state", async () => { + const closedHaltIssue = createIssue({ + id: "halt-1", + identifier: "SYMPH-123", + title: "Main branch build broken", + state: "Done", + labels: ["pipeline-halt"], + }); - await harness.orchestrator.pollTick(); - harness.setStateSnapshots([ - { id: "1", identifier: "ISSUE-1", state: "Done" }, - ]); + const regularIssues = [ + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + createIssue({ id: "2", identifier: "ISSUE-2", state: "Todo" }), + ]; - const reconcileTick = await harness.orchestrator.pollTick(); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return regularIssues; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + async fetchIssuesByLabels(labelNames: string[]) { + if (labelNames.includes("pipeline-halt")) { + return [closedHaltIssue]; + } + return []; + }, + }; - expect(reconcileTick.stopRequests).toEqual([ - { - issueId: "1", - issueIdentifier: "ISSUE-1", - cleanupWorkspace: true, - reason: "terminal_state", + const orchestrator = createOrchestrator({ tracker }); + const result = await orchestrator.pollTick(); + + expect(result.validation.ok).toBe(true); + expect(result.dispatchedIssueIds).toEqual(["1", "2"]); + expect(Object.keys(orchestrator.getState().running)).toEqual(["1", "2"]); + }); + + it("continues dispatch when fetchIssuesByLabels throws an error", async () => { + const regularIssues = [ + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + createIssue({ id: "2", identifier: "ISSUE-2", state: "Todo" }), + ]; + + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return regularIssues; }, - ]); - expect(harness.stopCalls).toEqual([ - { - issueId: "1", - issueIdentifier: "ISSUE-1", - cleanupWorkspace: true, - reason: "terminal_state", + async fetchIssuesByStates() { + return []; }, - ]); + async fetchIssueStatesByIds() { + return []; + }, + async fetchIssuesByLabels() { + throw new Error("Linear API error"); + }, + }; - harness.orchestrator.onWorkerExit({ - issueId: "1", - outcome: "abnormal", - reason: "stopped after terminal reconciliation", - }); - harness.setCandidates([]); + const orchestrator = createOrchestrator({ tracker }); + const result = await orchestrator.pollTick(); - const retryResult = await harness.orchestrator.onRetryTimer("1"); + expect(result.validation.ok).toBe(true); + expect(result.dispatchedIssueIds).toEqual(["1", "2"]); + expect(Object.keys(orchestrator.getState().running)).toEqual(["1", "2"]); + }); - expect(retryResult).toEqual({ - dispatched: false, - released: true, - retryEntry: null, - }); - expect([...harness.orchestrator.getState().claimed]).toEqual([]); - expect(harness.orchestrator.getState().retryAttempts).toEqual({}); + it("dispatches normally when tracker does not implement fetchIssuesByLabels", async () => { + const regularIssues = [ + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + createIssue({ id: "2", identifier: "ISSUE-2", state: "Todo" }), + ]; + + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return regularIssues; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + // Note: fetchIssuesByLabels is not implemented (optional) + }; + + const orchestrator = createOrchestrator({ tracker }); + const result = await orchestrator.pollTick(); + + expect(result.validation.ok).toBe(true); + expect(result.dispatchedIssueIds).toEqual(["1", "2"]); + expect(Object.keys(orchestrator.getState().running)).toEqual(["1", "2"]); }); + it("uses fetchOpenIssuesByLabels for halt check when available (P2: server-side filtering)", async () => { + let openIssuesByLabelsCalled = false; + let issuesByLabelsCalled = false; - it("stops a stalled worker through the fake runner boundary and releases it when the issue is no longer active", async () => { - const harness = createIntegrationHarness({ - now: "2026-03-06T00:10:00.000Z", - config: createConfig({ - codex: { stallTimeoutMs: 60_000 }, - }), - }); + const regularIssues = [ + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + ]; - await harness.orchestrator.pollTick(); - const runningEntry = harness.orchestrator.getState().running["1"]; - if (runningEntry === undefined) { - throw new Error("expected running entry for ISSUE-1"); - } - runningEntry.startedAt = "2026-03-06T00:00:00.000Z"; + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return regularIssues; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + async fetchIssuesByLabels() { + issuesByLabelsCalled = true; + return []; + }, + async fetchOpenIssuesByLabels() { + openIssuesByLabelsCalled = true; + return []; + }, + }; - const reconcileTick = await harness.orchestrator.pollTick(); + const orchestrator = createOrchestrator({ tracker }); + await orchestrator.pollTick(); - expect(reconcileTick.stopRequests).toContainEqual({ - issueId: "1", - issueIdentifier: "ISSUE-1", - cleanupWorkspace: false, - reason: "stall_timeout", - }); - expect(harness.stopCalls).toContainEqual({ - issueId: "1", - issueIdentifier: "ISSUE-1", - cleanupWorkspace: false, - reason: "stall_timeout", - }); + expect(openIssuesByLabelsCalled).toBe(true); + expect(issuesByLabelsCalled).toBe(false); + }); - harness.orchestrator.onWorkerExit({ - issueId: "1", - outcome: "abnormal", - reason: "stalled", + it("falls back to fetchIssuesByLabels when fetchOpenIssuesByLabels throws", async () => { + const haltIssue = createIssue({ + id: "halt-1", + identifier: "SYMPH-123", + title: "Main branch build broken", + state: "In Progress", + labels: ["pipeline-halt"], }); - harness.setCandidates([ - createIssue({ - id: "1", - identifier: "ISSUE-1", - state: "Backlog", - }), - ]); - const retryResult = await harness.orchestrator.onRetryTimer("1"); + const regularIssues = [ + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + ]; - expect(retryResult).toEqual({ - dispatched: false, - released: true, - retryEntry: null, - }); - expect([...harness.orchestrator.getState().claimed]).toEqual([]); - expect(harness.orchestrator.getState().retryAttempts).toEqual({}); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return regularIssues; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + async fetchIssuesByLabels(labelNames: string[]) { + if (labelNames.includes("pipeline-halt")) { + return [haltIssue]; + } + return []; + }, + async fetchOpenIssuesByLabels() { + throw new Error("Linear API timeout"); + }, + }; + + const orchestrator = createOrchestrator({ tracker }); + const result = await orchestrator.pollTick(); + + // Should halt dispatch because the fallback found the halt issue + expect(result.dispatchedIssueIds).toEqual([]); + expect(Object.keys(orchestrator.getState().running)).toEqual([]); }); }); -function createOrchestrator(overrides?: { - config?: ResolvedWorkflowConfig; - tracker?: IssueTracker; - timerScheduler?: ReturnType<typeof createFakeTimerScheduler>; - stopRunningIssue?: OrchestratorCoreOptions["stopRunningIssue"]; - now?: () => Date; -}) { - const tracker = - overrides?.tracker ?? - createTracker({ - candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], - statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], +describe("retry timer pipeline-halt guard", () => { + it("skips dispatch and requeues retry at same attempt when pipeline is halted", async () => { + const haltIssue = createIssue({ + id: "halt-1", + identifier: "SYMPH-99", + title: "CI broken", + state: "In Progress", + labels: ["pipeline-halt"], }); - const options: OrchestratorCoreOptions = { - config: overrides?.config ?? createConfig(), - tracker, - spawnWorker: async () => ({ - workerHandle: { pid: 1001 }, - monitorHandle: { ref: "monitor-1" }, - }), - now: overrides?.now ?? (() => new Date("2026-03-06T00:00:05.000Z")), - }; - if (overrides?.stopRunningIssue !== undefined) { - options.stopRunningIssue = overrides.stopRunningIssue; - } + const timers = createFakeTimerScheduler(); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + async fetchOpenIssuesByLabels(labelNames: string[]) { + if (labelNames.includes("pipeline-halt")) { + return [haltIssue]; + } + return []; + }, + }; + + const spawnCalls: string[] = []; + const orchestrator = new OrchestratorCore({ + config: createConfig(), + tracker, + spawnWorker: async ({ issue }) => { + spawnCalls.push(issue.id); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Manually set up a retry entry at attempt 2 + orchestrator.getState().claimed.add("1"); + orchestrator.getState().retryAttempts["1"] = { + issueId: "1", + identifier: "ISSUE-1", + attempt: 2, + dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), + timerHandle: null, + error: "previous failure", + delayType: "failure", + }; + + const result = await orchestrator.onRetryTimer("1"); + + // Should NOT dispatch + expect(result.dispatched).toBe(false); + expect(result.released).toBe(false); + expect(spawnCalls).toEqual([]); + + // Should requeue at the SAME attempt (2), not increment to 3 + expect(result.retryEntry).not.toBeNull(); + expect(result.retryEntry).toMatchObject({ + issueId: "1", + attempt: 2, + identifier: "ISSUE-1", + error: "pipeline halted: SYMPH-99", + delayType: "failure", + }); + + // Claim should still be held + expect(orchestrator.getState().claimed.has("1")).toBe(true); + }); + + it("dispatches normally when halt check returns no open issues", async () => { + const timers = createFakeTimerScheduler(); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }]; + }, + async fetchOpenIssuesByLabels() { + return []; + }, + }; + + const spawnCalls: string[] = []; + const orchestrator = new OrchestratorCore({ + config: createConfig(), + tracker, + spawnWorker: async ({ issue }) => { + spawnCalls.push(issue.id); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Set up a retry entry + orchestrator.getState().claimed.add("1"); + orchestrator.getState().retryAttempts["1"] = { + issueId: "1", + identifier: "ISSUE-1", + attempt: 1, + dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), + timerHandle: null, + error: "previous failure", + delayType: "failure", + }; + + const result = await orchestrator.onRetryTimer("1"); + + expect(result.dispatched).toBe(true); + expect(result.released).toBe(false); + expect(spawnCalls).toEqual(["1"]); + }); + + it("continues dispatch when halt check throws (fail-open)", async () => { + const timers = createFakeTimerScheduler(); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }]; + }, + async fetchOpenIssuesByLabels() { + throw new Error("Linear API timeout"); + }, + }; + + const spawnCalls: string[] = []; + const orchestrator = new OrchestratorCore({ + config: createConfig(), + tracker, + spawnWorker: async ({ issue }) => { + spawnCalls.push(issue.id); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Set up a retry entry + orchestrator.getState().claimed.add("1"); + orchestrator.getState().retryAttempts["1"] = { + issueId: "1", + identifier: "ISSUE-1", + attempt: 1, + dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), + timerHandle: null, + error: "previous failure", + delayType: "failure", + }; + + const result = await orchestrator.onRetryTimer("1"); + + // Should proceed with dispatch despite halt check failure + expect(result.dispatched).toBe(true); + expect(spawnCalls).toEqual(["1"]); + }); + + it("falls back to fetchIssuesByLabels when fetchOpenIssuesByLabels throws", async () => { + const haltIssue = createIssue({ + id: "halt-1", + identifier: "SYMPH-99", + title: "CI broken", + state: "In Progress", + labels: ["pipeline-halt"], + }); + + const timers = createFakeTimerScheduler(); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + async fetchIssuesByLabels(labelNames: string[]) { + if (labelNames.includes("pipeline-halt")) { + return [haltIssue]; + } + return []; + }, + async fetchOpenIssuesByLabels() { + throw new Error("Linear API timeout"); + }, + }; + + const spawnCalls: string[] = []; + const orchestrator = new OrchestratorCore({ + config: createConfig(), + tracker, + spawnWorker: async ({ issue }) => { + spawnCalls.push(issue.id); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + orchestrator.getState().claimed.add("1"); + orchestrator.getState().retryAttempts["1"] = { + issueId: "1", + identifier: "ISSUE-1", + attempt: 2, + dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), + timerHandle: null, + error: "previous failure", + delayType: "failure", + }; + + const result = await orchestrator.onRetryTimer("1"); + + // Should halt because fallback found the halt issue + expect(result.dispatched).toBe(false); + expect(result.retryEntry).toMatchObject({ + attempt: 2, + error: "pipeline halted: SYMPH-99", + }); + expect(spawnCalls).toEqual([]); + }); + + it("falls back to fetchIssuesByLabels when fetchOpenIssuesByLabels is not available", async () => { + const haltIssue = createIssue({ + id: "halt-1", + identifier: "SYMPH-99", + title: "CI broken", + state: "In Progress", + labels: ["pipeline-halt"], + }); + + const timers = createFakeTimerScheduler(); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + // Only fetchIssuesByLabels, no fetchOpenIssuesByLabels + async fetchIssuesByLabels(labelNames: string[]) { + if (labelNames.includes("pipeline-halt")) { + return [haltIssue]; + } + return []; + }, + }; + + const spawnCalls: string[] = []; + const orchestrator = new OrchestratorCore({ + config: createConfig(), + tracker, + spawnWorker: async ({ issue }) => { + spawnCalls.push(issue.id); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + orchestrator.getState().claimed.add("1"); + orchestrator.getState().retryAttempts["1"] = { + issueId: "1", + identifier: "ISSUE-1", + attempt: 2, + dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), + timerHandle: null, + error: "previous failure", + delayType: "failure", + }; + + const result = await orchestrator.onRetryTimer("1"); + + expect(result.dispatched).toBe(false); + expect(result.retryEntry).toMatchObject({ + attempt: 2, + error: "pipeline halted: SYMPH-99", + }); + expect(spawnCalls).toEqual([]); + }); +}); + +describe("orchestrator core integration flows", () => { + it("redispatches a retried issue through a fake runner boundary after an abnormal exit", async () => { + const harness = createIntegrationHarness(); + + const initialTick = await harness.orchestrator.pollTick(); + + expect(initialTick.dispatchedIssueIds).toEqual(["1"]); + expect(harness.spawnCalls).toEqual([ + { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + }, + ]); + expect([...harness.orchestrator.getState().claimed]).toEqual(["1"]); + + const retryEntry = harness.orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "turn failed", + }); + + expect(retryEntry).toMatchObject({ + issueId: "1", + attempt: 1, + error: "worker exited: turn failed", + }); + expect(harness.orchestrator.getState().running).toEqual({}); + + const retryResult = await harness.orchestrator.onRetryTimer("1"); + + expect(retryResult).toEqual({ + dispatched: true, + released: false, + retryEntry: null, + }); + expect(harness.spawnCalls).toEqual([ + { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + }, + { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: 1, + }, + ]); + expect(harness.orchestrator.getState().running["1"]?.retryAttempt).toBe(1); + expect([...harness.orchestrator.getState().claimed]).toEqual(["1"]); + }); + + it("requests terminal cleanup through the fake runner boundary and releases the claim once the issue disappears", async () => { + const harness = createIntegrationHarness(); + + await harness.orchestrator.pollTick(); + harness.setStateSnapshots([ + { id: "1", identifier: "ISSUE-1", state: "Done" }, + ]); + + const reconcileTick = await harness.orchestrator.pollTick(); + + expect(reconcileTick.stopRequests).toEqual([ + { + issueId: "1", + issueIdentifier: "ISSUE-1", + cleanupWorkspace: true, + reason: "terminal_state", + }, + ]); + expect(harness.stopCalls).toEqual([ + { + issueId: "1", + issueIdentifier: "ISSUE-1", + cleanupWorkspace: true, + reason: "terminal_state", + }, + ]); + + harness.orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "stopped after terminal reconciliation", + }); + harness.setCandidates([]); + + const retryResult = await harness.orchestrator.onRetryTimer("1"); + + expect(retryResult).toEqual({ + dispatched: false, + released: true, + retryEntry: null, + }); + expect([...harness.orchestrator.getState().claimed]).toEqual([]); + expect(harness.orchestrator.getState().retryAttempts).toEqual({}); + }); + + it("stops a stalled worker through the fake runner boundary and releases it when the issue is no longer active", async () => { + const harness = createIntegrationHarness({ + now: "2026-03-06T00:10:00.000Z", + config: createConfig({ + codex: { stallTimeoutMs: 60_000 }, + }), + }); + + await harness.orchestrator.pollTick(); + const runningEntry = harness.orchestrator.getState().running["1"]; + if (runningEntry === undefined) { + throw new Error("expected running entry for ISSUE-1"); + } + runningEntry.startedAt = "2026-03-06T00:00:00.000Z"; + + const reconcileTick = await harness.orchestrator.pollTick(); + + expect(reconcileTick.stopRequests).toContainEqual({ + issueId: "1", + issueIdentifier: "ISSUE-1", + cleanupWorkspace: false, + reason: "stall_timeout", + }); + expect(harness.stopCalls).toContainEqual({ + issueId: "1", + issueIdentifier: "ISSUE-1", + cleanupWorkspace: false, + reason: "stall_timeout", + }); + + harness.orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "stalled", + }); + harness.setCandidates([ + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Backlog", + }), + ]); + + const retryResult = await harness.orchestrator.onRetryTimer("1"); + + expect(retryResult).toEqual({ + dispatched: false, + released: true, + retryEntry: null, + }); + expect([...harness.orchestrator.getState().claimed]).toEqual([]); + expect(harness.orchestrator.getState().retryAttempts).toEqual({}); + }); +}); + +describe("max retry safety net", () => { + it("retries normally when attempt is under the max limit", async () => { + const timers = createFakeTimerScheduler(); + const orchestrator = createOrchestrator({ + timerScheduler: timers, + config: createConfig({ agent: { maxRetryAttempts: 3 } }), + }); + + await orchestrator.pollTick(); + // Simulate abnormal exit — attempt will be 1 (under limit of 3) + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "turn failed", + }); + + expect(retryEntry).not.toBeNull(); + expect(retryEntry).toMatchObject({ + issueId: "1", + attempt: 1, + error: "worker exited: turn failed", + }); + expect(orchestrator.getState().completed.has("1")).toBe(false); + expect(orchestrator.getState().claimed.has("1")).toBe(true); + }); + + it("escalates when failure retry attempt exceeds the max limit", async () => { + const escalationComments: Array<{ issueId: string; body: string }> = []; + const escalationStates: Array<{ issueId: string; state: string }> = []; + const timers = createFakeTimerScheduler(); + + const orchestrator = new OrchestratorCore({ + config: createConfig({ + agent: { maxRetryAttempts: 2 }, + }), + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + escalationComments.push({ issueId, body }); + }, + updateIssueState: async (issueId, _identifier, state) => { + escalationStates.push({ issueId, state }); + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + // Simulate: attempt 1 (under limit of 2) + const retry1 = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "turn failed", + }); + expect(retry1).not.toBeNull(); + expect(retry1).toMatchObject({ attempt: 1 }); + + // Fire retry timer → redispatch → exit again → attempt 2 (still at limit) + const retryResult = await orchestrator.onRetryTimer("1"); + expect(retryResult.dispatched).toBe(true); + + const retry2 = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "turn failed again", + }); + expect(retry2).not.toBeNull(); + expect(retry2).toMatchObject({ attempt: 2 }); + + // Fire retry timer → redispatch → exit again → attempt 3 (exceeds limit of 2) + const retryResult2 = await orchestrator.onRetryTimer("1"); + expect(retryResult2.dispatched).toBe(true); + + const retry3 = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "turn failed yet again", + }); + + // Should be null — escalated + expect(retry3).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().claimed.has("1")).toBe(false); + expect(orchestrator.getState().retryAttempts).not.toHaveProperty("1"); + + // Verify escalation side effects were fired + expect(escalationComments).toHaveLength(1); + expect(escalationComments[0]?.body).toContain( + "Max retry attempts (2) exceeded", + ); + }); + + it("escalates on onRetryTimer failure retry when attempt exceeds limit", async () => { + const escalationComments: Array<{ issueId: string; body: string }> = []; + const timers = createFakeTimerScheduler(); + + const orchestrator = new OrchestratorCore({ + config: createConfig({ + agent: { maxConcurrentAgents: 0, maxRetryAttempts: 2 }, + }), + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + escalationComments.push({ issueId, body }); + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Manually create a retry entry at attempt 2 (the limit) + orchestrator.getState().claimed.add("1"); + orchestrator.getState().retryAttempts["1"] = { + issueId: "1", + identifier: "ISSUE-1", + attempt: 2, + dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), + timerHandle: null, + error: "previous failure", + delayType: "failure", + }; + + // When onRetryTimer fires and slots are exhausted, it calls scheduleRetry + // with attempt 3, which exceeds maxRetryAttempts=2 + const result = await orchestrator.onRetryTimer("1"); + + expect(result.dispatched).toBe(false); + expect(result.retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().claimed.has("1")).toBe(false); + expect(escalationComments).toHaveLength(1); + expect(escalationComments[0]?.body).toContain( + "Max retry attempts (2) exceeded", + ); + }); + + it("does not count continuation retries against the max limit", async () => { + const timers = createFakeTimerScheduler(); + const orchestrator = createOrchestrator({ + timerScheduler: timers, + config: createConfig({ agent: { maxRetryAttempts: 1 } }), + }); + + await orchestrator.pollTick(); + + // Normal exit with no failure signal → continuation retry with attempt=1 + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:00:05.000Z"), + }); + + // Should still succeed even though maxRetryAttempts=1 + // because continuation retries don't count against the limit + expect(retryEntry).not.toBeNull(); + expect(retryEntry).toMatchObject({ + issueId: "1", + attempt: 1, + error: null, + }); + // After the fix for SYMPH-126, continuations no longer add to completed — + // only terminal completions do. + expect(orchestrator.getState().completed.has("1")).toBe(false); + expect(orchestrator.getState().claimed.has("1")).toBe(true); + }); + + it("respects the limit for verify failure signals", async () => { + const escalationComments: Array<{ issueId: string; body: string }> = []; + + const orchestrator = new OrchestratorCore({ + config: createConfig({ + agent: { maxRetryAttempts: 1 }, + }), + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + escalationComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + // First exit with verify failure → attempt 1 (at limit, still OK) + const retry1 = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: verify]", + }); + expect(retry1).not.toBeNull(); + expect(retry1).toMatchObject({ attempt: 1 }); + + // Fire retry, redispatch, exit with verify failure again → attempt 2 (exceeds limit=1) + const retryResult = await orchestrator.onRetryTimer("1"); + expect(retryResult.dispatched).toBe(true); + + const retry2 = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: verify]", + }); + + expect(retry2).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().claimed.has("1")).toBe(false); + expect(escalationComments).toHaveLength(1); + expect(escalationComments[0]?.body).toContain( + "Max retry attempts (1) exceeded", + ); + }); + + it("respects the limit for infra failure signals", async () => { + const escalationComments: Array<{ issueId: string; body: string }> = []; + + const orchestrator = new OrchestratorCore({ + config: createConfig({ + agent: { maxRetryAttempts: 1 }, + }), + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + escalationComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + // First exit with infra failure → attempt 1 (at limit) + const retry1 = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: infra]", + }); + expect(retry1).not.toBeNull(); + + const retryResult = await orchestrator.onRetryTimer("1"); + expect(retryResult.dispatched).toBe(true); + + // Second exit with infra failure → attempt 2 (exceeds limit=1) + const retry2 = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: infra]", + }); + + expect(retry2).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(escalationComments).toHaveLength(1); + }); + + it("defaults maxRetryAttempts to 5 from config resolver", () => { + const config = createConfig(); + expect(config.agent.maxRetryAttempts).toBe(5); + }); +}); + +describe("completed issue resume guard", () => { + it("does NOT re-dispatch a completed issue still in 'In Review' state", () => { + const config = createConfig({ + agent: { maxConcurrentAgents: 2 }, + }); + // Include Resume and Blocked in active_states for this test + config.tracker.activeStates = [ + "Todo", + "In Progress", + "In Review", + "Blocked", + "Resume", + ]; + config.escalationState = "Blocked"; + + const orchestrator = createOrchestrator({ config }); + + // Mark issue as completed (simulates having finished the pipeline) + orchestrator.getState().completed.add("1"); + + // Issue is still "In Review" on the tracker — should NOT be re-dispatched + const eligible = orchestrator.isDispatchEligible( + createIssue({ id: "1", identifier: "ISSUE-1", state: "In Review" }), + ); + + expect(eligible).toBe(false); + // completed flag should NOT be cleared + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); + + it("does NOT re-dispatch a completed issue still in 'In Progress' state", () => { + const config = createConfig({ + agent: { maxConcurrentAgents: 2 }, + }); + config.tracker.activeStates = [ + "Todo", + "In Progress", + "In Review", + "Blocked", + "Resume", + ]; + config.escalationState = "Blocked"; + + const orchestrator = createOrchestrator({ config }); + orchestrator.getState().completed.add("1"); + + const eligible = orchestrator.isDispatchEligible( + createIssue({ id: "1", identifier: "ISSUE-1", state: "In Progress" }), + ); + + expect(eligible).toBe(false); + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); + + it("re-dispatches a completed issue moved to 'Resume' state", () => { + const config = createConfig({ + agent: { maxConcurrentAgents: 2 }, + }); + config.tracker.activeStates = [ + "Todo", + "In Progress", + "In Review", + "Blocked", + "Resume", + ]; + config.escalationState = "Blocked"; + + const orchestrator = createOrchestrator({ config }); + orchestrator.getState().completed.add("1"); + + const eligible = orchestrator.isDispatchEligible( + createIssue({ id: "1", identifier: "ISSUE-1", state: "Resume" }), + ); + + expect(eligible).toBe(true); + // completed flag should be cleared + expect(orchestrator.getState().completed.has("1")).toBe(false); + }); + + it("re-dispatches a completed issue moved to 'Todo' state", () => { + const config = createConfig({ + agent: { maxConcurrentAgents: 2 }, + }); + config.tracker.activeStates = [ + "Todo", + "In Progress", + "In Review", + "Blocked", + "Resume", + ]; + config.escalationState = "Blocked"; + + const orchestrator = createOrchestrator({ config }); + orchestrator.getState().completed.add("1"); + + const eligible = orchestrator.isDispatchEligible( + createIssue({ id: "1", identifier: "ISSUE-1", state: "Todo" }), + ); + + expect(eligible).toBe(true); + expect(orchestrator.getState().completed.has("1")).toBe(false); + }); + + it("skips terminal_state stop for worker in final active stage (merge → done)", async () => { + const config = createConfig(); + config.stages = { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "merge", onApprove: null, onRework: null }, + linearState: null, + }, + merge: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "done", onApprove: null, onRework: null }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; + const harness = createIntegrationHarness({ config }); + + // Dispatch the issue, which puts it in running state + await harness.orchestrator.pollTick(); + + // Simulate: worker is in the "merge" stage (final active stage before terminal "done") + harness.orchestrator.getState().issueStages["1"] = "merge"; + + // Issue transitions to Done (e.g., advanceStage fired updateIssueState) + harness.setStateSnapshots([ + { id: "1", identifier: "ISSUE-1", state: "Done" }, + ]); + + const result = await harness.orchestrator.pollTick(); + + // Worker should NOT be stopped — it's in the final active stage + expect(result.stopRequests).toEqual([]); + expect(harness.stopCalls).toEqual([]); + }); + + it("stops worker in non-final stage when issue reaches terminal state", async () => { + const config = createConfig(); + config.stages = { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "merge", onApprove: null, onRework: null }, + linearState: null, + }, + merge: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: "done", onApprove: null, onRework: null }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; + const harness = createIntegrationHarness({ config }); + + // Dispatch the issue + await harness.orchestrator.pollTick(); + + // Worker is in "investigate" stage (NOT the final active stage) + harness.orchestrator.getState().issueStages["1"] = "investigate"; + + // Issue manually moved to Done by a human + harness.setStateSnapshots([ + { id: "1", identifier: "ISSUE-1", state: "Done" }, + ]); + + const result = await harness.orchestrator.pollTick(); + + // Worker SHOULD be stopped — investigate is not the final active stage + expect(result.stopRequests).toEqual([ + { + issueId: "1", + issueIdentifier: "ISSUE-1", + cleanupWorkspace: true, + reason: "terminal_state", + }, + ]); + }); + + it("does NOT re-dispatch a completed issue in escalation state ('Blocked')", () => { + const config = createConfig({ + agent: { maxConcurrentAgents: 2 }, + }); + config.tracker.activeStates = [ + "Todo", + "In Progress", + "In Review", + "Blocked", + "Resume", + ]; + config.escalationState = "Blocked"; + + const orchestrator = createOrchestrator({ config }); + orchestrator.getState().completed.add("1"); + + const eligible = orchestrator.isDispatchEligible( + createIssue({ id: "1", identifier: "ISSUE-1", state: "Blocked" }), + ); + + expect(eligible).toBe(false); + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); +}); + +describe("execution history stage records", () => { + function createStageConfig() { + const config = createConfig(); + config.stages = { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; + return config; + } + + it("stage record appended on worker exit", async () => { + const config = createStageConfig(); + const orchestrator = createOrchestrator({ config }); + + await orchestrator.pollTick(); + // Set the issue to the investigate stage + orchestrator.getState().issueStages["1"] = "investigate"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:00:10.000Z"), + }); + + const history = orchestrator.getState().issueExecutionHistory["1"]; + expect(history).toBeDefined(); + expect(history).toHaveLength(1); + }); + + it("stage record captures all fields", async () => { + const config = createStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "investigate"; + + // Apply codex event to give the running entry some token/turn data + orchestrator.onCodexEvent({ + issueId: "1", + event: { + event: "turn_completed", + timestamp: "2026-03-06T00:00:06.000Z", + codexAppServerPid: "1001", + sessionId: "s1", + threadId: "t1", + turnId: "turn-1", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + rateLimits: {}, + message: "done", + }, + }); + + const startedAt = orchestrator.getState().running["1"]?.startedAt; + expect(startedAt).toBeDefined(); + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + const history = orchestrator.getState().issueExecutionHistory["1"]; + expect(history).toBeDefined(); + expect(history).toHaveLength(1); + const record = history![0]!; + expect(record.stageName).toBe("investigate"); + expect(record.durationMs).toBe(60_000); + expect(record.totalTokens).toBeGreaterThanOrEqual(0); + expect(typeof record.turns).toBe("number"); + expect(record.outcome).toBe("normal"); + }); + + it("StageRecord captures per-type tokens on stage completion", async () => { + const config = createStageConfig(); + const orchestrator = createOrchestrator({ + config, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "investigate"; + + // Simulate turn_completed with 3000 input and 2000 output tokens + orchestrator.onCodexEvent({ + issueId: "1", + event: { + event: "turn_completed", + timestamp: "2026-03-06T00:00:06.000Z", + codexAppServerPid: "1001", + sessionId: "s1", + threadId: "t1", + turnId: "turn-1", + usage: { inputTokens: 3000, outputTokens: 2000, totalTokens: 5000 }, + rateLimits: {}, + message: "done", + }, + }); + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + const history = orchestrator.getState().issueExecutionHistory["1"]; + expect(history).toBeDefined(); + expect(history).toHaveLength(1); + const record = history![0]!; + expect(record.stageName).toBe("investigate"); + expect(record.inputTokens).toBe(3000); + expect(record.outputTokens).toBe(2000); + expect(record.totalTokens).toBe(5000); + }); + + it("accumulates records across multiple stages", async () => { + const config = createStageConfig(); + const orchestrator = createOrchestrator({ config }); + + // First stage: investigate + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "investigate"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:00.000Z"), + }); + + // After normal exit, stage advances to "implement" + // issueExecutionHistory should have 1 record for "investigate" + const historyAfterFirst = + orchestrator.getState().issueExecutionHistory["1"]; + expect(historyAfterFirst).toHaveLength(1); + expect(historyAfterFirst![0]!.stageName).toBe("investigate"); + + // Second stage: implement + await orchestrator.onRetryTimer("1"); + orchestrator.getState().issueStages["1"] = "implement"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + endedAt: new Date("2026-03-06T00:02:00.000Z"), + }); + + // issueExecutionHistory should have 2 records + const historyAfterSecond = + orchestrator.getState().issueExecutionHistory["1"]; + expect(historyAfterSecond).toHaveLength(2); + expect(historyAfterSecond![1]!.stageName).toBe("implement"); + expect(historyAfterSecond![1]!.outcome).toBe("failed_to_start"); + }); + + it("does not append a stage record when no stage is set for the issue", async () => { + const orchestrator = createOrchestrator(); + + await orchestrator.pollTick(); + // No issueStages entry — no stage configured + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:00.000Z"), + }); + + // issueExecutionHistory should have no entry for this issue + expect(orchestrator.getState().issueExecutionHistory["1"]).toBeUndefined(); + }); +}); + +describe("execution report on terminal state", () => { + function createTerminalStageConfig() { + const config = createConfig(); + config.stages = { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "merge", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + merge: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; + return config; + } + + it("posts execution report on terminal state", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "merge"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // Allow microtasks (void promise) to flush + await Promise.resolve(); + + expect(postedComments).toHaveLength(1); + expect(postedComments[0]?.body).toMatch(/^## Execution Report/); + }); + + it("execution report contains stage timeline", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + // Manually inject history for investigate and merge stages + orchestrator.getState().issueExecutionHistory["1"] = [ + { + stageName: "investigate", + durationMs: 18_000, + totalTokens: 50_000, + turns: 5, + outcome: "normal", + }, + ]; + orchestrator.getState().issueStages["1"] = "merge"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + await Promise.resolve(); + + expect(postedComments).toHaveLength(1); + const body = postedComments[0]!.body; + // Table columns + expect(body).toContain("| Stage |"); + expect(body).toContain("| Duration |"); + expect(body).toContain("| Tokens |"); + expect(body).toContain("| Turns |"); + expect(body).toContain("| Outcome |"); + // Stage rows + expect(body).toContain("investigate"); + expect(body).toContain("merge"); + }); + + it("execution report contains total tokens", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueExecutionHistory["1"] = [ + { + stageName: "investigate", + durationMs: 18_000, + totalTokens: 50_000, + turns: 5, + outcome: "normal", + }, + { + stageName: "implement", + durationMs: 120_000, + totalTokens: 200_000, + turns: 10, + outcome: "normal", + }, + { + stageName: "review", + durationMs: 45_000, + totalTokens: 80_000, + turns: 3, + outcome: "normal", + }, + ]; + orchestrator.getState().issueStages["1"] = "merge"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + await Promise.resolve(); + + expect(postedComments).toHaveLength(1); + const body = postedComments[0]!.body; + expect(body).toContain("Total tokens"); + // 50000 + 200000 + 80000 = 330000, plus merge stage tokens (0 in this test) + // The merge stage exit adds its record too + expect(body).toMatch(/Total tokens.*\d/); + }); + + it("execution report shows rework count", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "merge"; + orchestrator.getState().issueReworkCounts["1"] = 1; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + await Promise.resolve(); + + expect(postedComments).toHaveLength(1); + const body = postedComments[0]!.body; + expect(body).toContain("Rework count"); + expect(body).toContain("1"); + }); + + it("execution report includes rework stages", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + // Simulate: investigate, implement, review (fail), implement (rework), review (pass) + orchestrator.getState().issueExecutionHistory["1"] = [ + { + stageName: "investigate", + durationMs: 10_000, + totalTokens: 10_000, + turns: 3, + outcome: "normal", + }, + { + stageName: "implement", + durationMs: 60_000, + totalTokens: 80_000, + turns: 8, + outcome: "normal", + }, + { + stageName: "review", + durationMs: 20_000, + totalTokens: 30_000, + turns: 2, + outcome: "normal", + }, + { + stageName: "implement", + durationMs: 50_000, + totalTokens: 70_000, + turns: 7, + outcome: "normal", + }, + { + stageName: "review", + durationMs: 25_000, + totalTokens: 35_000, + turns: 2, + outcome: "normal", + }, + ]; + orchestrator.getState().issueStages["1"] = "merge"; + orchestrator.getState().issueReworkCounts["1"] = 1; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + await Promise.resolve(); + + expect(postedComments).toHaveLength(1); + const body = postedComments[0]!.body; + // 5 pre-existing records + 1 merge record = 6 total stage rows + const tableRows = body + .split("\n") + .filter( + (line) => + line.startsWith("| ") && + !line.startsWith("| Stage") && + !line.startsWith("|----"), + ); + expect(tableRows).toHaveLength(6); + }); + + it("execution report failure does not block terminal transition", async () => { + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (_issueId, _body) => { + throw new Error("postComment failed"); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "merge"; + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // Terminal transition: returns null (no retry), issue is completed + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); + + it("history cleaned up even if report posting fails", async () => { + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (_issueId, _body) => { + throw new Error("postComment failed"); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "merge"; + orchestrator.getState().issueExecutionHistory["1"] = [ + { + stageName: "investigate", + durationMs: 10_000, + totalTokens: 10_000, + turns: 3, + outcome: "normal", + }, + ]; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // State should be cleaned up regardless of postComment failure + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + // History may contain the merge record from onWorkerExit, but after advanceStage it's deleted + expect(orchestrator.getState().issueExecutionHistory["1"]).toBeUndefined(); + }); + + it("no execution report without postComment", async () => { + // No postComment configured — just verify it completes normally without error + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + // postComment intentionally not configured + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "merge"; + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // Issue completes normally + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().completed.has("1")).toBe(true); + // No side effects + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + }); + + it("execution history cleaned up after completion", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + // Pre-populate execution history with 4 stages + orchestrator.getState().issueExecutionHistory["1"] = [ + { + stageName: "investigate", + durationMs: 18_000, + totalTokens: 50_000, + turns: 5, + outcome: "normal", + }, + { + stageName: "implement", + durationMs: 120_000, + totalTokens: 200_000, + turns: 10, + outcome: "normal", + }, + { + stageName: "review", + durationMs: 45_000, + totalTokens: 80_000, + turns: 3, + outcome: "normal", + }, + ]; + orchestrator.getState().issueStages["1"] = "merge"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // Allow microtasks (void promise) to flush + await Promise.resolve(); + + // Execution history must be deleted from orchestrator state after Done + expect(orchestrator.getState().issueExecutionHistory["1"]).toBeUndefined(); + // Stages and rework counts also cleaned up + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + // Issue is marked completed + expect(orchestrator.getState().completed.has("1")).toBe(true); + // Report was still posted before cleanup + expect(postedComments).toHaveLength(1); + }); +}); + +describe("review findings comment on agent review failure", () => { + /** + * Build a stage config with: + * implement (agent) → review (agent, onRework: implement, maxRework: N) → done (terminal) + */ + function createReviewStageConfig(maxRework = 2) { + const config = createConfig(); + config.escalationState = "Blocked"; + config.tracker.activeStates = [ + "Todo", + "In Progress", + "In Review", + "Blocked", + ]; + config.stages = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: "implement", + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; + return config; + } + + it("posts review findings comment on agent review failure", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createReviewStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: + "[STAGE_FAILED: review] Missing null check in handler.ts line 42", + }); + + // Flush microtasks so the void promise resolves + await Promise.resolve(); + + const reviewComment = postedComments.find((c) => + c.body.startsWith("## Review Findings"), + ); + expect(reviewComment).toBeDefined(); + expect(reviewComment?.issueId).toBe("1"); + }); + + it("review findings comment includes agent message", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createReviewStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: + "[STAGE_FAILED: review] Missing null check in handler.ts line 42", + }); + + await Promise.resolve(); + + const reviewComment = postedComments.find((c) => + c.body.startsWith("## Review Findings"), + ); + expect(reviewComment?.body).toContain( + "Missing null check in handler.ts line 42", + ); + }); + + it("review failure triggers rework after posting comment", async () => { + const config = createReviewStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: + "[STAGE_FAILED: review] Missing null check in handler.ts line 42", + }); + + // Should schedule a rework retry (continuation, not failure) + expect(retryEntry).not.toBeNull(); + expect(retryEntry?.error).toContain("rework to implement"); + // Stage should be updated to the rework target + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + it("review findings comment failure does not block rework", async () => { + const config = createReviewStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (_issueId, _body) => { + throw new Error("Comment service unavailable"); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review] Some failure", + }); + + // Rework must proceed despite postComment throwing + expect(retryEntry).not.toBeNull(); + expect(retryEntry?.error).toContain("rework to implement"); + }); + + it("postComment error is swallowed for review findings", async () => { + const config = createReviewStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (_issueId, _body) => { + throw new Error("Comment service unavailable"); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + + // Should not throw — error must be swallowed + let threw = false; + try { + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review] Some failure", + }); + // Allow microtasks to flush so the void promise rejects internally + await Promise.resolve(); + } catch { + threw = true; + } + + expect(threw).toBe(false); + }); + + it("skips review findings when postComment not configured", async () => { + const config = createReviewStageConfig(); + // No postComment wired — omit it entirely + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review] Some failure", + }); + + // Rework still proceeds + expect(retryEntry).not.toBeNull(); + expect(retryEntry?.error).toContain("rework to implement"); + // No comment was posted (no postComment configured — no crash either) + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + it("escalation fires on max rework exceeded", async () => { + const escalationComments: Array<{ issueId: string; body: string }> = []; + const stateUpdates: Array<{ issueId: string; state: string }> = []; + const config = createReviewStageConfig(1); // maxRework=1 + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + escalationComments.push({ issueId, body }); + }, + updateIssueState: async (issueId, _issueIdentifier, stateName) => { + stateUpdates.push({ issueId, state: stateName }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + // Already used 1 rework — next failure should trigger escalation + orchestrator.getState().issueReworkCounts["1"] = 1; + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review] Another null check failure", + }); + + await Promise.resolve(); + + // Escalation: issue is failed, no retry + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + + // Escalation side effects fire + expect(stateUpdates).toHaveLength(1); + expect(stateUpdates[0]?.state).toBe("Blocked"); + expect(escalationComments).toHaveLength(1); + expect(escalationComments[0]?.body).toContain( + "max rework attempts exceeded", + ); + }); + + it("no review findings on escalation", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const config = createReviewStageConfig(1); // maxRework=1 + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + updateIssueState: async (_issueId, _identifier, _state) => { + // no-op + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "review"; + orchestrator.getState().issueReworkCounts["1"] = 1; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review] Another null check failure", + }); + + await Promise.resolve(); + + // Only the escalation comment should have been posted — not a review findings comment + const reviewFindings = postedComments.filter((c) => + c.body.startsWith("## Review Findings"), + ); + expect(reviewFindings).toHaveLength(0); + + // The escalation comment should be present + const escalation = postedComments.filter( + (c) => !c.body.startsWith("## Review Findings"), + ); + expect(escalation).toHaveLength(1); + expect(escalation[0]?.body).toContain("max rework attempts exceeded"); + }); +}); + +describe("auto-close parent", () => { + function createTerminalStageConfig() { + const config = createConfig(); + config.stages = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; + return config; + } + + it("auto-close parent fires on terminal state transition", async () => { + const autoCloseCalls: Array<{ + issueId: string; + issueIdentifier: string; + }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "SYMPH-1" })], + statesById: [{ id: "1", identifier: "SYMPH-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + autoCloseParentIssue: async (issueId, issueIdentifier) => { + autoCloseCalls.push({ issueId, issueIdentifier }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "implement"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // Allow microtasks (void promise) to flush + await Promise.resolve(); + + expect(autoCloseCalls).toHaveLength(1); + expect(autoCloseCalls[0]).toEqual({ + issueId: "1", + issueIdentifier: "SYMPH-1", + }); + }); + + it("auto-close parent does not fire on non-terminal stage transitions", async () => { + const autoCloseCalls: Array<{ + issueId: string; + issueIdentifier: string; + }> = []; + const config = createConfig(); + config.stages = { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; + + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "SYMPH-1" })], + statesById: [{ id: "1", identifier: "SYMPH-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + autoCloseParentIssue: async (issueId, issueIdentifier) => { + autoCloseCalls.push({ issueId, issueIdentifier }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "implement"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // Allow microtasks to flush + await Promise.resolve(); + + // Should not fire — this was a non-terminal transition (implement → review) + expect(autoCloseCalls).toHaveLength(0); + }); + + it("auto-close parent failure does not block terminal transition", async () => { + const updateStateCalls: Array<{ + issueId: string; + stateName: string; + }> = []; + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "SYMPH-1" })], + statesById: [{ id: "1", identifier: "SYMPH-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + updateIssueState: async (issueId, _identifier, stateName) => { + updateStateCalls.push({ issueId, stateName }); + }, + autoCloseParentIssue: async () => { + throw new Error("Linear API unreachable"); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "implement"; + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + // Allow microtasks to flush + await Promise.resolve(); + + // The terminal state update should still have fired despite autoCloseParentIssue failure + expect(updateStateCalls).toHaveLength(1); + expect(updateStateCalls[0]).toEqual({ issueId: "1", stateName: "Done" }); + + // Issue should be completed (not blocked by the auto-close failure) + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); + + it("auto-close parent is not called when callback is not provided", async () => { + const config = createTerminalStageConfig(); + const orchestrator = new OrchestratorCore({ + config, + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "SYMPH-1" })], + statesById: [{ id: "1", identifier: "SYMPH-1", state: "In Progress" }], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.getState().issueStages["1"] = "implement"; + + // Should not throw even without autoCloseParentIssue callback + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:01:05.000Z"), + }); + + await Promise.resolve(); + + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); +}); + +describe("fast-track label-based stage routing", () => { + function createFastTrackConfig( + overrides?: Partial<ResolvedWorkflowConfig>, + ): ResolvedWorkflowConfig { + return { + ...createConfig(), + stages: { + initialStage: "investigate", + fastTrack: { label: "trivial", initialStage: "implement" }, + stages: Object.freeze({ + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }), + }, + ...overrides, + }; + } + + it("fast-track: trivial-labeled issue starts at fast-track initial stage", async () => { + const spawnedStageNames: Array<string | null> = []; + const orchestrator = new OrchestratorCore({ + config: createFastTrackConfig(), + tracker: createTracker({ + candidates: [ + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Todo", + labels: ["trivial"], + }), + ], + }), + spawnWorker: async ({ stageName }) => { + spawnedStageNames.push(stageName); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + expect(spawnedStageNames).toEqual(["implement"]); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + it("fast-track: non-trivial issue follows normal pipeline (starts at investigate)", async () => { + const spawnedStageNames: Array<string | null> = []; + const orchestrator = new OrchestratorCore({ + config: createFastTrackConfig(), + tracker: createTracker({ + candidates: [ + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Todo", + labels: [], + }), + ], + }), + spawnWorker: async ({ stageName }) => { + spawnedStageNames.push(stageName); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + expect(spawnedStageNames).toEqual(["investigate"]); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + }); + + it("fast-track: case-insensitive label matching (label already normalized to lowercase by linear-normalize.ts)", async () => { + // Labels are normalized to lowercase upstream — "trivial" in config matches "trivial" in issue + const spawnedStageNames: Array<string | null> = []; + const orchestrator = new OrchestratorCore({ + config: createFastTrackConfig(), + tracker: createTracker({ + candidates: [ + // label is already normalized to lowercase "trivial" (as linear-normalize.ts does) + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Todo", + labels: ["trivial"], + }), + ], + }), + spawnWorker: async ({ stageName }) => { + spawnedStageNames.push(stageName); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + expect(spawnedStageNames).toEqual(["implement"]); + }); + + it("fast-track: issue with cached stage ignores fast-track and continues from cached stage", async () => { + const spawnedStageNames: Array<string | null> = []; + const orchestrator = new OrchestratorCore({ + config: createFastTrackConfig(), + tracker: createTracker({ + candidates: [ + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Todo", + labels: ["trivial"], + }), + ], + }), + spawnWorker: async ({ stageName }) => { + spawnedStageNames.push(stageName); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Pre-set a cached stage for this issue + orchestrator.getState().issueStages["1"] = "review" as unknown as string; + + // Manually add a "review" stage to handle the cached stage scenario + // (The orchestrator will use the cached "review" value — which is not in our test stage config + // so stage will be null, but stageName will be "review", proving cached stage takes priority) + const config = createFastTrackConfig(); + const orchestratorWithReview = new OrchestratorCore({ + config: { + ...config, + stages: config.stages + ? { + ...config.stages, + stages: Object.freeze({ + ...config.stages.stages, + review: { + type: "agent" as const, + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + }), + } + : null, + }, + tracker: createTracker({ + candidates: [ + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Todo", + labels: ["trivial"], + }), + ], + }), + spawnWorker: async ({ stageName }) => { + spawnedStageNames.push(stageName); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Pre-set the cached stage — fast-track should be ignored + orchestratorWithReview.getState().issueStages["1"] = "review"; + + await orchestratorWithReview.pollTick(); + + expect(spawnedStageNames).toEqual(["review"]); + expect(orchestratorWithReview.getState().issueStages["1"]).toBe("review"); + }); + + it("no fast-track: issue with trivial label uses default initialStage when no fast_track config", async () => { + const spawnedStageNames: Array<string | null> = []; + const configWithoutFastTrack = createFastTrackConfig(); + const orchestrator = new OrchestratorCore({ + config: { + ...configWithoutFastTrack, + stages: configWithoutFastTrack.stages + ? { ...configWithoutFastTrack.stages, fastTrack: null } + : null, + }, + tracker: createTracker({ + candidates: [ + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Todo", + labels: ["trivial"], + }), + ], + }), + spawnWorker: async ({ stageName }) => { + spawnedStageNames.push(stageName); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + expect(spawnedStageNames).toEqual(["investigate"]); + }); + + it("fast-track: logs activation message when fast-track is applied", async () => { + const logs: string[] = []; + const originalLog = console.log; + console.log = (...args: unknown[]) => { + logs.push(args.join(" ")); + }; + + try { + const orchestrator = new OrchestratorCore({ + config: createFastTrackConfig(), + tracker: createTracker({ + candidates: [ + createIssue({ + id: "1", + identifier: "ISSUE-1", + state: "Todo", + labels: ["trivial"], + }), + ], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + } finally { + console.log = originalLog; + } + + expect(logs).toContainEqual( + "[orchestrator] Fast-tracking ISSUE-1 to implement (label: trivial)", + ); + }); +}); + +function createOrchestrator(overrides?: { + config?: ResolvedWorkflowConfig; + tracker?: IssueTracker; + timerScheduler?: ReturnType<typeof createFakeTimerScheduler>; + stopRunningIssue?: OrchestratorCoreOptions["stopRunningIssue"]; + now?: () => Date; +}) { + const tracker = + overrides?.tracker ?? + createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + statesById: [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }], + }); + const options: OrchestratorCoreOptions = { + config: overrides?.config ?? createConfig(), + tracker, + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + now: overrides?.now ?? (() => new Date("2026-03-06T00:00:05.000Z")), + }; + + if (overrides?.stopRunningIssue !== undefined) { + options.stopRunningIssue = overrides.stopRunningIssue; + } if (overrides?.timerScheduler !== undefined) { options.timerScheduler = overrides.timerScheduler; @@ -570,6 +3424,7 @@ function createConfig(overrides?: { maxConcurrentAgents: 2, maxTurns: 5, maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, maxConcurrentAgentsByState: {}, ...overrides?.agent, }, @@ -585,12 +3440,19 @@ function createConfig(overrides?: { }, server: { port: null, + slackNotifyChannel: null, }, observability: { dashboardEnabled: true, refreshMs: 1_000, renderIntervalMs: 16, }, + runner: { + kind: "codex", + model: null, + }, + stages: null, + escalationState: null, }; } @@ -703,3 +3565,35 @@ function createIntegrationHarness(input?: { }, }; } + +describe("classifyExitOutcome", () => { + it("classifies abnormal exit with turnCount=0 as failed_to_start", () => { + expect(classifyExitOutcome("abnormal", 0, "some error")).toBe( + "failed_to_start", + ); + }); + + it("classifies abnormal exit with stall_timeout in reason as timed_out", () => { + expect( + classifyExitOutcome("abnormal", 5, "stopped after stall_timeout"), + ).toBe("timed_out"); + }); + + it("classifies abnormal exit without stall_timeout as error", () => { + expect(classifyExitOutcome("abnormal", 3, "some error message")).toBe( + "error", + ); + }); + + it("passes through normal outcome unchanged", () => { + expect(classifyExitOutcome("normal", 2, undefined)).toBe("normal"); + }); + + it("passes through already classified outcomes unchanged", () => { + expect(classifyExitOutcome("failed_to_start", 0, undefined)).toBe( + "failed_to_start", + ); + expect(classifyExitOutcome("timed_out", 3, undefined)).toBe("timed_out"); + expect(classifyExitOutcome("error", 1, undefined)).toBe("error"); + }); +}); diff --git a/tests/orchestrator/dispatch-tracking.test.ts b/tests/orchestrator/dispatch-tracking.test.ts new file mode 100644 index 00000000..3a6113a3 --- /dev/null +++ b/tests/orchestrator/dispatch-tracking.test.ts @@ -0,0 +1,290 @@ +import { describe, expect, it } from "vitest"; + +import type { + ResolvedWorkflowConfig, + StagesConfig, +} from "../../src/config/types.js"; +import type { Issue } from "../../src/domain/model.js"; +import { createInitialOrchestratorState } from "../../src/domain/model.js"; +import { formatEasternTimestamp } from "../../src/logging/format-timestamp.js"; +import { + OrchestratorCore, + type OrchestratorCoreOptions, +} from "../../src/orchestrator/core.js"; +import type { IssueTracker } from "../../src/tracker/tracker.js"; + +describe("issueFirstDispatchedAt tracking", () => { + it("createInitialOrchestratorState includes issueFirstDispatchedAt as empty object", () => { + const state = createInitialOrchestratorState({ + pollIntervalMs: 30_000, + maxConcurrentAgents: 2, + }); + expect(state.issueFirstDispatchedAt).toEqual({}); + }); + + it("first dispatch sets issueFirstDispatchedAt for that issue", async () => { + const dispatchTime = new Date("2026-03-06T00:00:05.000Z"); + const orchestrator = createOrchestrator({ + now: () => dispatchTime, + }); + + await orchestrator.pollTick(); + + expect(orchestrator.getState().issueFirstDispatchedAt["1"]).toBe( + formatEasternTimestamp(dispatchTime), + ); + }); + + it("subsequent dispatch preserves original issueFirstDispatchedAt", async () => { + const t1 = new Date("2026-03-06T00:00:05.000Z"); + const t2 = new Date("2026-03-06T00:01:00.000Z"); + let currentTime = t1; + + const orchestrator = createOrchestrator({ + stages: createTwoAgentStageConfig(), + now: () => currentTime, + }); + + // First dispatch at T1 + await orchestrator.pollTick(); + expect(orchestrator.getState().issueFirstDispatchedAt["1"]).toBe( + formatEasternTimestamp(t1), + ); + + // Worker exits, stage advances to "implement" + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Advance time to T2 before second dispatch + currentTime = t2; + await orchestrator.onRetryTimer("1"); + + // issueFirstDispatchedAt must still be T1, not T2 + expect(orchestrator.getState().issueFirstDispatchedAt["1"]).toBe( + formatEasternTimestamp(t1), + ); + }); + + it("terminal cleanup deletes issueFirstDispatchedAt", async () => { + const orchestrator = createOrchestrator({ + stages: createTerminalStageConfig(), + }); + + // Dispatch to "implement" stage — sets issueFirstDispatchedAt + await orchestrator.pollTick(); + expect(orchestrator.getState().issueFirstDispatchedAt["1"]).toBeDefined(); + + // Normal exit advances to "done" (terminal) — triggers cleanup + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + + expect(orchestrator.getState().issueFirstDispatchedAt["1"]).toBeUndefined(); + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); +}); + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function createOrchestrator(overrides?: { + stages?: StagesConfig | null; + now?: () => Date; +}) { + const stages = overrides?.stages !== undefined ? overrides.stages : null; + + const options: OrchestratorCoreOptions = { + config: createConfig({ stages }), + tracker: createTracker({ + candidates: [createIssue({ id: "1", identifier: "ISSUE-1" })], + }), + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + now: overrides?.now ?? (() => new Date("2026-03-06T00:00:05.000Z")), + }; + + return new OrchestratorCore(options); +} + +function createTracker(input?: { candidates?: Issue[] }): IssueTracker { + return { + async fetchCandidateIssues() { + return ( + input?.candidates ?? [createIssue({ id: "1", identifier: "ISSUE-1" })] + ); + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return []; + }, + }; +} + +function createConfig(overrides?: { + stages?: StagesConfig | null; +}): ResolvedWorkflowConfig { + return { + workflowPath: "/tmp/WORKFLOW.md", + promptTemplate: "Prompt", + tracker: { + kind: "linear", + endpoint: "https://api.linear.app/graphql", + apiKey: "token", + projectSlug: "project", + activeStates: ["Todo", "In Progress", "In Review"], + terminalStates: ["Done", "Canceled"], + }, + polling: { + intervalMs: 30_000, + }, + workspace: { + root: "/tmp/workspaces", + }, + hooks: { + afterCreate: null, + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 30_000, + }, + agent: { + maxConcurrentAgents: 2, + maxTurns: 5, + maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, + maxConcurrentAgentsByState: {}, + }, + codex: { + command: "codex-app-server", + approvalPolicy: "never", + threadSandbox: null, + turnSandboxPolicy: null, + turnTimeoutMs: 300_000, + readTimeoutMs: 30_000, + stallTimeoutMs: 300_000, + }, + server: { + port: null, + slackNotifyChannel: null, + }, + observability: { + dashboardEnabled: true, + refreshMs: 1_000, + renderIntervalMs: 16, + }, + runner: { + kind: "codex", + model: null, + }, + stages: overrides?.stages ?? null, + escalationState: null, + }; +} + +function createIssue(overrides?: Partial<Issue>): Issue { + return { + id: overrides?.id ?? "1", + identifier: overrides?.identifier ?? "ISSUE-1", + title: overrides?.title ?? "Example issue", + description: overrides?.description ?? null, + priority: overrides?.priority ?? 1, + state: overrides?.state ?? "In Progress", + branchName: overrides?.branchName ?? null, + url: overrides?.url ?? null, + labels: overrides?.labels ?? [], + blockedBy: overrides?.blockedBy ?? [], + createdAt: overrides?.createdAt ?? "2026-03-01T00:00:00.000Z", + updatedAt: overrides?.updatedAt ?? "2026-03-01T00:00:00.000Z", + }; +} + +/** Two agent stages followed by a terminal stage — used to test second dispatch. */ +function createTwoAgentStageConfig(): StagesConfig { + return { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: "claude-code", + model: "claude-opus-4", + prompt: "investigate.liquid", + maxTurns: 8, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: null, + onApprove: null, + onRework: null, + }, + linearState: null, + }, + }, + }; +} + +/** One agent stage leading to a terminal stage — used to test cleanup. */ +function createTerminalStageConfig(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} diff --git a/tests/orchestrator/failure-signals.test.ts b/tests/orchestrator/failure-signals.test.ts new file mode 100644 index 00000000..5cae4e10 --- /dev/null +++ b/tests/orchestrator/failure-signals.test.ts @@ -0,0 +1,1662 @@ +import { describe, expect, it, vi } from "vitest"; + +import type { + ResolvedWorkflowConfig, + StageDefinition, + StagesConfig, +} from "../../src/config/types.js"; +import type { Issue } from "../../src/domain/model.js"; +import { + OrchestratorCore, + type OrchestratorCoreOptions, +} from "../../src/orchestrator/core.js"; +import type { IssueTracker } from "../../src/tracker/tracker.js"; + +describe("failure signal routing in onWorkerExit", () => { + it("advances stage normally when no failure signal is present", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_COMPLETE]", + }); + + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBeNull(); + }); + + it("advances stage normally when agentMessage is undefined", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + }); + + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + it("schedules retry with backoff on [STAGE_FAILED: verify]", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "Tests failed.\n[STAGE_FAILED: verify]\nSee logs.", + }); + + // Stage should NOT advance — stays at investigate + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent reported failure: verify"); + }); + + it("schedules retry with backoff on [STAGE_FAILED: infra]", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: infra]", + }); + + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent reported failure: infra"); + }); + + it("escalates immediately on [STAGE_FAILED: spec] — no retry", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: spec]", + }); + + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().claimed.has("1")).toBe(false); + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + }); + + it("prevents redispatch of escalated issues still in Blocked state", async () => { + // After escalation, Linear state becomes "Blocked". The completed flag + // keeps the issue blocked while it remains in the escalation state. + let issueState = "In Progress"; + const orchestrator = createStagedOrchestrator({ + escalationState: "Blocked", + candidates: [ + createIssue({ id: "1", identifier: "ISSUE-1", state: issueState }), + ], + trackerFactory: () => + createTracker({ + candidatesFn: () => [ + createIssue({ id: "1", identifier: "ISSUE-1", state: issueState }), + ], + }), + }); + + await orchestrator.pollTick(); + // Simulate escalation side-effect moving issue to Blocked + issueState = "Blocked"; + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: spec]", + }); + + expect(orchestrator.getState().failed.has("1")).toBe(true); + + const result = await orchestrator.pollTick(); + expect(result.dispatchedIssueIds).not.toContain("1"); + expect(orchestrator.getState().running["1"]).toBeUndefined(); + }); + + it("allows redispatch of resumed issues moved out of Blocked state", async () => { + let issueState = "In Progress"; + const orchestrator = createStagedOrchestrator({ + escalationState: "Blocked", + trackerFactory: () => + createTracker({ + candidatesFn: () => [ + createIssue({ id: "1", identifier: "ISSUE-1", state: issueState }), + ], + }), + }); + + await orchestrator.pollTick(); + issueState = "Blocked"; + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: spec]", + }); + expect(orchestrator.getState().failed.has("1")).toBe(true); + + // Human moves issue to "Resume" → next poll should re-dispatch + issueState = "Todo"; + const result = await orchestrator.pollTick(); + expect(result.dispatchedIssueIds).toContain("1"); + expect(orchestrator.getState().failed.has("1")).toBe(false); + }); + + it("triggers rework on [STAGE_FAILED: review] with gate workflow", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createGateWorkflowConfig(), + }); + + // First dispatch puts issue in "implement" stage + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Should rework back to implement (gate's onRework target) + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent review failure: rework to implement"); + }); + + it("escalates review failure when max rework exceeded", async () => { + const base = createGateWorkflowConfig(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 1 }, + }, + }; + + const orchestrator = createStagedOrchestrator({ stages }); + + await orchestrator.pollTick(); + + // First review failure — rework (count 1 of max 1) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Re-dispatch from rework + await orchestrator.onRetryTimer("1"); + + // Second review failure — should escalate (count would exceed max) + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + }); + + it("falls back to retry for review failure when no stages configured", async () => { + const orchestrator = createStagedOrchestrator({ stages: null }); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent reported failure: review"); + }); + + it("falls back to retry for review failure when no downstream gate exists", async () => { + // Three stage config has no gate stages + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // No gate found → falls back to retry + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent reported failure: review"); + }); + + it("does not parse failure signals on abnormal exits", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "process crashed", + agentMessage: "[STAGE_FAILED: spec]", + }); + + // Abnormal exit should use existing retry behavior, ignoring failure signal + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("worker exited: process crashed"); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + }); + + it("increments rework count across multiple review failures", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createGateWorkflowConfig(), + }); + + await orchestrator.pollTick(); + + // First review failure + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + + // Re-dispatch + await orchestrator.onRetryTimer("1"); + + // Second review failure + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(2); + }); + + it("passes correct reworkCount to spawnWorker during rework cycle", async () => { + const spawnCalls: Array<{ reworkCount: number }> = []; + const orchestrator = createStagedOrchestrator({ + stages: createGateWorkflowConfig(), + onSpawn: (input) => { + spawnCalls.push({ reworkCount: input.reworkCount }); + }, + }); + + // Initial dispatch — reworkCount should be 0 + await orchestrator.pollTick(); + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0]!.reworkCount).toBe(0); + + // First review failure → rework + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + await orchestrator.onRetryTimer("1"); + expect(spawnCalls).toHaveLength(2); + expect(spawnCalls[1]!.reworkCount).toBe(1); + + // Second review failure → rework + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + await orchestrator.onRetryTimer("1"); + expect(spawnCalls).toHaveLength(3); + expect(spawnCalls[2]!.reworkCount).toBe(2); + }); + + it("calls updateIssueState on spec failure when escalationState is configured", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + escalationState: "Blocked", + updateIssueState, + postComment, + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: spec]", + }); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Blocked"); + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("spec failure"), + ); + }); + + it("calls updateIssueState on review escalation when escalationState is configured", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const postComment = vi.fn().mockResolvedValue(undefined); + + const base = createGateWorkflowConfig(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 0 }, + }, + }; + + const orchestrator = createStagedOrchestrator({ + stages, + escalationState: "Blocked", + updateIssueState, + postComment, + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Blocked"); + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("max rework"), + ); + }); + + it("does not call updateIssueState when escalationState is null", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + escalationState: null, + updateIssueState, + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: spec]", + }); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(updateIssueState).not.toHaveBeenCalled(); + }); +}); + +describe("agent-type review stage rework routing", () => { + it("triggers rework on [STAGE_FAILED: review] from agent-type stage with onRework", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + }); + + // First dispatch puts issue in "implement" stage + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Normal exit advances to "review" (agent-type with onRework) + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Re-dispatch review agent + await orchestrator.onRetryTimer("1"); + + // Review agent reports failure + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Should rework back to implement (agent stage's onRework target) + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent review failure: rework to implement"); + }); + + it("increments reworkCount across multiple agent review→implement cycles", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + }); + + await orchestrator.pollTick(); + + // Advance through implement → review + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // First review failure + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Re-dispatch implement, advance back to review + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Second review failure + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(2); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + it("escalates when maxRework exceeded on agent-type review stage", async () => { + const base = createAgentReviewWorkflowConfig(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 1 }, + }, + }; + + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages, + escalationState: "Blocked", + updateIssueState, + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // First review failure — rework (count 1 of max 1) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Re-dispatch implement, advance back to review + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Second review failure — should escalate (count would exceed max) + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Blocked"); + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("max rework"), + ); + }); + + it("routes implement-stage review failure through downstream agent-type review stage with onRework", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + }); + + // Dispatch puts issue in "implement" stage + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Implement agent reports [STAGE_FAILED: review] — should find downstream + // agent-type review stage via findDownstreamGate and use its onRework + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Should rework back to implement via the downstream review stage's onRework + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent review failure: rework to implement"); + }); + + it("agent-type stage WITHOUT onRework falls back to retry on review failure", async () => { + // Three-stage config has no onRework on any stage and no gate stages + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // No onRework, no downstream gate → falls back to retry + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent reported failure: review"); + }); + + it("passes correct reworkCount to spawnWorker during agent review rework cycle", async () => { + const spawnCalls: Array<{ reworkCount: number; stageName: string | null }> = + []; + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + onSpawn: (input) => { + spawnCalls.push({ + reworkCount: input.reworkCount, + stageName: input.stageName, + }); + }, + }); + + // Initial dispatch — implement stage, reworkCount 0 + await orchestrator.pollTick(); + expect(spawnCalls).toHaveLength(1); + expect(spawnCalls[0]!.reworkCount).toBe(0); + expect(spawnCalls[0]!.stageName).toBe("implement"); + + // Advance to review + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + expect(spawnCalls).toHaveLength(2); + expect(spawnCalls[1]!.stageName).toBe("review"); + + // Review fails → rework to implement + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + await orchestrator.onRetryTimer("1"); + expect(spawnCalls).toHaveLength(3); + expect(spawnCalls[2]!.reworkCount).toBe(1); + expect(spawnCalls[2]!.stageName).toBe("implement"); + }); +}); + +describe("review findings comment posting on agent review failure", () => { + it("posts review findings comment on agent review failure", async () => { + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + postComment, + }); + + // Dispatch to implement stage + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Advance to review stage + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + await orchestrator.onRetryTimer("1"); + + // Review agent reports failure with message + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: + "Missing null check in handler.ts line 42\n[STAGE_FAILED: review]", + }); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("## Review Findings"), + ); + }); + + it("review findings comment includes agent message", async () => { + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review stage + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Review agent reports failure with specific message + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: + "Missing null check in handler.ts line 42\n[STAGE_FAILED: review]", + }); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(postComment).toHaveBeenCalledTimes(1); + const commentBody = postComment.mock.calls[0]![1] as string; + expect(commentBody).toContain("Missing null check in handler.ts line 42"); + expect(commentBody).toContain("review"); + }); + + it("review failure triggers rework after posting comment", async () => { + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review stage + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Review agent reports failure + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: + "Missing null check in handler.ts line 42\n[STAGE_FAILED: review]", + }); + + // Should rework back to implement + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent review failure: rework to implement"); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + // Comment was posted before rework + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("## Review Findings"), + ); + }); + + it("does not let comment posting failure affect rework flow", async () => { + const postComment = vi.fn().mockRejectedValue(new Error("network error")); + + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review stage + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Review agent reports failure — comment posting will fail + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Rework should still succeed despite comment failure + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent review failure: rework to implement"); + + // Allow async side effects to fire (and fail silently) + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(postComment).toHaveBeenCalled(); + }); + + it("review findings comment failure does not block rework", async () => { + const postComment = vi.fn().mockRejectedValue(new Error("network error")); + + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review stage + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Review agent reports failure — comment will fail to post + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Rework must proceed regardless of comment failure + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent review failure: rework to implement"); + + // Allow async side effects to fire (and fail silently) + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(postComment).toHaveBeenCalled(); + }); + + it("postComment error is swallowed for review findings", async () => { + const postComment = vi.fn().mockRejectedValue(new Error("timeout")); + + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review stage + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Review fails — postComment will throw + let thrownError: unknown = null; + try { + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + } catch (err) { + thrownError = err; + } + + // Error must not propagate to caller + expect(thrownError).toBeNull(); + + // Allow async side effects to settle + await new Promise((resolve) => setTimeout(resolve, 10)); + + // postComment was called but the error was swallowed + expect(postComment).toHaveBeenCalled(); + }); + + it("skips review findings when postComment not configured", async () => { + // No postComment wired — orchestrator created without it + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + }); + + await orchestrator.pollTick(); + + // Advance to review stage + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Review agent reports failure — no postComment configured + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Rework should still proceed + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent review failure: rework to implement"); + }); + + it("escalation fires on max rework exceeded", async () => { + const base = createAgentReviewWorkflowConfig(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 1 }, + }, + }; + + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages, + escalationState: "Blocked", + updateIssueState, + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // First review failure — rework (count 1 of max 1) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Second review failure — should escalate + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Blocked"); + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("max rework"), + ); + }); + + it("no review findings on escalation", async () => { + const base = createAgentReviewWorkflowConfig(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 1 }, + }, + }; + + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages, + escalationState: "Blocked", + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to review + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // First review failure — rework + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Allow the review findings comment to fire for the first failure + await new Promise((resolve) => setTimeout(resolve, 10)); + postComment.mockClear(); + + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Second review failure — escalation (max exceeded) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + // Only the escalation comment should have been posted — not a review findings comment + expect(postComment).toHaveBeenCalledTimes(1); + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("max rework"), + ); + expect(postComment).not.toHaveBeenCalledWith( + "1", + expect.stringContaining("## Review Findings"), + ); + }); +}); + +describe("rebase failure signal routing", () => { + it("triggers rework on [STAGE_FAILED: rebase] with onRework configured", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createMergeWithRebaseWorkflowConfig(), + }); + + // Dispatch to implement stage + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Advance implement → merge + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("merge"); + await orchestrator.onRetryTimer("1"); + + // Merge agent reports rebase failure + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + + // Should rework back to implement (merge stage's onRework target) + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("rebase failure: rework to implement"); + }); + + it("increments rework count on rebase failure", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createMergeWithRebaseWorkflowConfig(), + }); + + await orchestrator.pollTick(); + + // Advance to merge + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // First rebase failure + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + + // Re-dispatch implement, advance back to merge + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Second rebase failure + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(2); + }); + + it("escalates when max rework exceeded on rebase failure", async () => { + const base = createMergeWithRebaseWorkflowConfig(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + merge: { ...base.stages.merge!, maxRework: 1 }, + }, + }; + + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages, + escalationState: "Blocked", + updateIssueState, + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to merge + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // First rebase failure — rework (count 1 of max 1) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + + // Re-dispatch implement, advance back to merge + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Second rebase failure — should escalate + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Blocked"); + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("max rework"), + ); + }); + + it("posts a Rebase Needed comment on rebase failure with onRework", async () => { + const postComment = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createMergeWithRebaseWorkflowConfig(), + postComment, + }); + + await orchestrator.pollTick(); + + // Advance to merge + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Merge agent reports rebase failure with message + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "Merge conflict in src/handler.ts\n[STAGE_FAILED: rebase]", + }); + + // Allow async side effects to fire + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(postComment).toHaveBeenCalledWith( + "1", + expect.stringContaining("## Rebase Needed"), + ); + }); + + it("falls back to retry for rebase failure when no onRework configured", async () => { + // Three-stage config has no onRework on any stage + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + + // No onRework → falls back to retry + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent reported failure: rebase"); + }); + + it("falls back to retry for rebase failure when no stages configured", async () => { + const orchestrator = createStagedOrchestrator({ stages: null }); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.error).toBe("agent reported failure: rebase"); + }); + + it("shares rework counter with review failures", async () => { + const base = createMergeWithRebaseWorkflowConfig(); + // Add an agent review stage with onRework before merge + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + implement: { + ...base.stages.implement!, + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + }, + review: { + type: "agent", + runner: "claude-code", + model: "claude-opus-4-6", + prompt: "review.liquid", + maxTurns: 15, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: 2, + reviewers: [], + transitions: { + onComplete: "merge", + onApprove: null, + onRework: "implement", + }, + linearState: null, + }, + merge: { ...base.stages.merge!, maxRework: 2 }, + }, + }; + + const orchestrator = createStagedOrchestrator({ stages }); + + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Advance implement → review + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Two review failures (rework count goes to 2) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: review]", + }); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(2); + + // Now advance through review → merge + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + await orchestrator.onRetryTimer("1"); + + // Rebase failure should escalate because total rework count (3) exceeds max (2) + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + agentMessage: "[STAGE_FAILED: rebase]", + }); + + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + }); +}); + +// --- Helpers --- + +function createStagedOrchestrator(overrides?: { + stages?: StagesConfig | null; + candidates?: Issue[]; + escalationState?: string | null; + updateIssueState?: OrchestratorCoreOptions["updateIssueState"]; + postComment?: OrchestratorCoreOptions["postComment"]; + trackerFactory?: () => IssueTracker; + onSpawn?: (input: { + issue: Issue; + attempt: number | null; + stage: StageDefinition | null; + stageName: string | null; + reworkCount: number; + }) => void; +}) { + const stages = + overrides?.stages !== undefined + ? overrides.stages + : createThreeStageConfig(); + + const tracker = + overrides?.trackerFactory?.() ?? + createTracker({ + candidates: overrides?.candidates ?? [ + createIssue({ id: "1", identifier: "ISSUE-1" }), + ], + }); + + const options: OrchestratorCoreOptions = { + config: createConfig({ + stages, + ...(overrides?.escalationState !== undefined + ? { escalationState: overrides.escalationState } + : {}), + }), + tracker, + spawnWorker: async (input) => { + overrides?.onSpawn?.(input); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + ...(overrides?.updateIssueState !== undefined + ? { updateIssueState: overrides.updateIssueState } + : {}), + ...(overrides?.postComment !== undefined + ? { postComment: overrides.postComment } + : {}), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }; + + return new OrchestratorCore(options); +} + +function createThreeStageConfig(): StagesConfig { + return { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: "claude-code", + model: "claude-opus-4", + prompt: "investigate.liquid", + maxTurns: 8, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createGateWorkflowConfig(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "gate", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble", + maxRework: 3, + reviewers: [], + transitions: { + onComplete: null, + onApprove: "merge", + onRework: "implement", + }, + linearState: null, + }, + merge: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "merge.liquid", + maxTurns: 5, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createAgentReviewWorkflowConfig(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "agent", + runner: "claude-code", + model: "claude-opus-4-6", + prompt: "review.liquid", + maxTurns: 15, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: 3, + reviewers: [], + transitions: { + onComplete: "merge", + onApprove: null, + onRework: "implement", + }, + linearState: null, + }, + merge: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "merge.liquid", + maxTurns: 5, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createMergeWithRebaseWorkflowConfig(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "merge", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + merge: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "merge.liquid", + maxTurns: 5, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: 3, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: "implement", + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createTracker(input?: { + candidates?: Issue[]; + candidatesFn?: () => Issue[]; +}): IssueTracker { + const getCandidates = () => + input?.candidatesFn?.() ?? + input?.candidates ?? [createIssue({ id: "1", identifier: "ISSUE-1" })]; + + return { + async fetchCandidateIssues() { + return getCandidates(); + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + const candidates = getCandidates(); + return candidates.map((issue) => ({ + id: issue.id, + identifier: issue.identifier, + state: issue.state, + })); + }, + }; +} + +function createConfig(overrides?: { + stages?: StagesConfig | null; + escalationState?: string | null; +}): ResolvedWorkflowConfig { + return { + workflowPath: "/tmp/WORKFLOW.md", + promptTemplate: "Prompt", + tracker: { + kind: "linear", + endpoint: "https://api.linear.app/graphql", + apiKey: "token", + projectSlug: "project", + activeStates: ["Todo", "In Progress", "In Review"], + terminalStates: ["Done", "Canceled"], + }, + polling: { + intervalMs: 30_000, + }, + workspace: { + root: "/tmp/workspaces", + }, + hooks: { + afterCreate: null, + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 30_000, + }, + agent: { + maxConcurrentAgents: 2, + maxTurns: 5, + maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, + maxConcurrentAgentsByState: {}, + }, + runner: { + kind: "codex", + model: null, + }, + codex: { + command: "codex-app-server", + approvalPolicy: "never", + threadSandbox: null, + turnSandboxPolicy: null, + turnTimeoutMs: 300_000, + readTimeoutMs: 30_000, + stallTimeoutMs: 300_000, + }, + server: { + port: null, + slackNotifyChannel: null, + }, + observability: { + dashboardEnabled: true, + refreshMs: 1_000, + renderIntervalMs: 16, + }, + stages: overrides?.stages !== undefined ? overrides.stages : null, + escalationState: overrides?.escalationState ?? null, + }; +} + +function createIssue(overrides?: Partial<Issue>): Issue { + return { + id: overrides?.id ?? "1", + identifier: overrides?.identifier ?? "ISSUE-1", + title: overrides?.title ?? "Example issue", + description: overrides?.description ?? null, + priority: overrides?.priority ?? 1, + state: overrides?.state ?? "In Progress", + branchName: overrides?.branchName ?? null, + url: overrides?.url ?? null, + labels: overrides?.labels ?? [], + blockedBy: overrides?.blockedBy ?? [], + createdAt: overrides?.createdAt ?? "2026-03-01T00:00:00.000Z", + updatedAt: overrides?.updatedAt ?? "2026-03-01T00:00:00.000Z", + }; +} diff --git a/tests/orchestrator/gate-handler.test.ts b/tests/orchestrator/gate-handler.test.ts new file mode 100644 index 00000000..87aeee47 --- /dev/null +++ b/tests/orchestrator/gate-handler.test.ts @@ -0,0 +1,1184 @@ +import { describe, expect, it, vi } from "vitest"; + +import type { AgentRunnerCodexClient } from "../../src/agent/runner.js"; +import type { CodexTurnResult } from "../../src/codex/app-server-client.js"; +import type { + ReviewerDefinition, + StageDefinition, +} from "../../src/config/types.js"; +import type { ExecutionHistory, Issue } from "../../src/domain/model.js"; +import { + type AggregateVerdict, + type CreateReviewerClient, + type EnsembleGateResult, + type PostComment, + RATE_LIMIT_PATTERNS, + type ReviewerResult, + aggregateVerdicts, + formatExecutionReport, + formatGateComment, + formatRebaseComment, + formatReviewFindingsComment, + parseReviewerOutput, + runEnsembleGate, +} from "../../src/orchestrator/gate-handler.js"; + +describe("aggregateVerdicts", () => { + it("returns pass for empty results", () => { + expect(aggregateVerdicts([])).toBe("pass"); + }); + + it("returns pass when all reviewers pass", () => { + const results = [ + createResult({ verdict: "pass" }), + createResult({ verdict: "pass" }), + ]; + expect(aggregateVerdicts(results)).toBe("pass"); + }); + + it("returns fail when any reviewer fails", () => { + const results = [ + createResult({ verdict: "pass" }), + createResult({ verdict: "fail" }), + ]; + expect(aggregateVerdicts(results)).toBe("fail"); + }); + + it("returns fail when all reviewers fail", () => { + const results = [ + createResult({ verdict: "fail" }), + createResult({ verdict: "fail" }), + ]; + expect(aggregateVerdicts(results)).toBe("fail"); + }); + + it("returns pass when one reviewer passes and another errors", () => { + const results = [ + createResult({ verdict: "pass" }), + createResult({ verdict: "error" }), + ]; + expect(aggregateVerdicts(results)).toBe("pass"); + }); + + it("returns fail when all reviewers error (no review occurred)", () => { + const results = [ + createResult({ verdict: "error" }), + createResult({ verdict: "error" }), + ]; + expect(aggregateVerdicts(results)).toBe("fail"); + }); + + it("returns fail when one reviewer fails and another errors", () => { + const results = [ + createResult({ verdict: "fail" }), + createResult({ verdict: "error" }), + ]; + expect(aggregateVerdicts(results)).toBe("fail"); + }); +}); + +describe("parseReviewerOutput", () => { + const reviewer: ReviewerDefinition = { + runner: "codex", + model: "gpt-5.3-codex", + role: "adversarial-reviewer", + prompt: null, + }; + + it("parses valid JSON verdict with feedback", () => { + const raw = [ + '{"role": "adversarial-reviewer", "model": "gpt-5.3-codex", "verdict": "pass"}', + "", + "Code looks good. No issues found.", + ].join("\n"); + + const result = parseReviewerOutput(reviewer, raw); + expect(result.verdict.verdict).toBe("pass"); + expect(result.verdict.role).toBe("adversarial-reviewer"); + expect(result.verdict.model).toBe("gpt-5.3-codex"); + expect(result.feedback).toContain("Code looks good"); + }); + + it("parses verdict embedded in code block", () => { + const raw = [ + "Here is my review:", + "```", + '{"role": "security-reviewer", "model": "gemini-3-pro", "verdict": "fail"}', + "```", + "Found SQL injection vulnerability in user input handling.", + ].join("\n"); + + const result = parseReviewerOutput(reviewer, raw); + expect(result.verdict.verdict).toBe("fail"); + expect(result.verdict.role).toBe("security-reviewer"); + expect(result.feedback).toContain("SQL injection"); + }); + + it("defaults to fail for empty output", () => { + const result = parseReviewerOutput(reviewer, ""); + expect(result.verdict.verdict).toBe("fail"); + expect(result.feedback).toContain("empty output"); + }); + + it("defaults to fail when no valid JSON found", () => { + const result = parseReviewerOutput(reviewer, "Some random feedback text"); + expect(result.verdict.verdict).toBe("fail"); + expect(result.feedback).toBe("Some random feedback text"); + }); + + it("uses reviewer defaults when JSON missing role/model", () => { + const raw = '{"verdict": "pass"}'; + const result = parseReviewerOutput(reviewer, raw); + expect(result.verdict.role).toBe("adversarial-reviewer"); + expect(result.verdict.model).toBe("gpt-5.3-codex"); + expect(result.verdict.verdict).toBe("pass"); + }); + + it("returns error verdict when output contains rate-limit text", () => { + const raw = + "You have exhausted your capacity on this model. Please try again later."; + const result = parseReviewerOutput(reviewer, raw); + expect(result.verdict.verdict).toBe("error"); + expect(result.verdict.role).toBe("adversarial-reviewer"); + expect(result.verdict.model).toBe("gpt-5.3-codex"); + expect(result.feedback).toContain("exhausted your capacity"); + }); + + it("returns error verdict for quota exceeded text (case-insensitive)", () => { + const raw = "Error: Quota Exceeded for this billing period."; + const result = parseReviewerOutput(reviewer, raw); + expect(result.verdict.verdict).toBe("error"); + }); + + it("still returns fail for genuine non-JSON review without rate-limit text", () => { + const raw = + "This code has serious issues but I cannot format my response as JSON."; + const result = parseReviewerOutput(reviewer, raw); + expect(result.verdict.verdict).toBe("fail"); + expect(result.feedback).toBe(raw); + }); +}); + +describe("formatGateComment", () => { + it("formats a passing gate comment", () => { + const results = [ + createResult({ verdict: "pass", role: "reviewer-1", feedback: "LGTM" }), + ]; + const comment = formatGateComment("pass", results); + expect(comment).toContain("Ensemble Review: PASS"); + expect(comment).toContain("reviewer-1"); + expect(comment).toContain("LGTM"); + }); + + it("formats a failing gate comment with multiple reviewers", () => { + const results = [ + createResult({ verdict: "pass", role: "reviewer-1", feedback: "OK" }), + createResult({ + verdict: "fail", + role: "security-reviewer", + feedback: "Found XSS vulnerability", + }), + ]; + const comment = formatGateComment("fail", results); + expect(comment).toContain("Ensemble Review: FAIL"); + expect(comment).toContain("reviewer-1"); + expect(comment).toContain("PASS"); + expect(comment).toContain("security-reviewer"); + expect(comment).toContain("FAIL"); + expect(comment).toContain("Found XSS vulnerability"); + }); +}); + +describe("formatReviewFindingsComment", () => { + it("starts with ## Review Findings header", () => { + const comment = formatReviewFindingsComment( + "ISSUE-42", + "review", + "Some message", + ); + expect(comment.startsWith("## Review Findings")).toBe(true); + }); + + it("includes the stage name and issue identifier", () => { + const comment = formatReviewFindingsComment( + "ISSUE-42", + "review", + "Some message", + ); + expect(comment).toContain("review"); + expect(comment).toContain("ISSUE-42"); + }); + + it("includes the agent message when provided", () => { + const comment = formatReviewFindingsComment( + "ISSUE-1", + "review", + "Missing null check in handler.ts line 42", + ); + expect(comment).toContain("Missing null check in handler.ts line 42"); + }); + + it("omits the message body when agentMessage is empty", () => { + const comment = formatReviewFindingsComment("ISSUE-1", "review", ""); + expect(comment).toContain("## Review Findings"); + expect(comment).toContain("review"); + // Should not have extra blank lines from empty message + expect(comment.split("\n").filter(Boolean).length).toBeLessThan(5); + }); +}); + +describe("formatRebaseComment", () => { + it("starts with ## Rebase Needed header", () => { + const comment = formatRebaseComment("ISSUE-42", "merge", "Some message"); + expect(comment.startsWith("## Rebase Needed")).toBe(true); + }); + + it("includes the stage name and issue identifier", () => { + const comment = formatRebaseComment("ISSUE-42", "merge", "Some message"); + expect(comment).toContain("merge"); + expect(comment).toContain("ISSUE-42"); + }); + + it("includes the agent message when provided", () => { + const comment = formatRebaseComment( + "ISSUE-1", + "merge", + "Merge conflict in src/handler.ts", + ); + expect(comment).toContain("Merge conflict in src/handler.ts"); + }); + + it("omits the message body when agentMessage is empty", () => { + const comment = formatRebaseComment("ISSUE-1", "merge", ""); + expect(comment).toContain("## Rebase Needed"); + expect(comment).toContain("merge"); + expect(comment.split("\n").filter(Boolean).length).toBeLessThan(5); + }); +}); + +describe("runEnsembleGate", () => { + it("returns pass with empty comment when no reviewers configured", async () => { + const result = await runEnsembleGate({ + issue: createIssue(), + stage: createGateStage({ reviewers: [] }), + createReviewerClient: () => { + throw new Error("Should not be called"); + }, + }); + + expect(result.aggregate).toBe("pass"); + expect(result.results).toHaveLength(0); + expect(result.comment).toContain("No reviewers configured"); + }); + + it("spawns reviewers in parallel and aggregates pass verdicts", async () => { + const clientCalls: string[] = []; + const result = await runEnsembleGate({ + issue: createIssue(), + stage: createGateStage({ + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "adversarial-reviewer", + prompt: null, + }, + { + runner: "gemini", + model: "gemini-3-pro", + role: "security-reviewer", + prompt: null, + }, + ], + }), + createReviewerClient: (reviewer) => { + clientCalls.push(reviewer.role); + return createMockClient( + `{"role": "${reviewer.role}", "model": "${reviewer.model}", "verdict": "pass"}\n\nLooks good.`, + ); + }, + }); + + expect(clientCalls).toContain("adversarial-reviewer"); + expect(clientCalls).toContain("security-reviewer"); + expect(result.aggregate).toBe("pass"); + expect(result.results).toHaveLength(2); + expect(result.results.every((r) => r.verdict.verdict === "pass")).toBe( + true, + ); + }); + + it("aggregates to fail when one reviewer fails", async () => { + const result = await runEnsembleGate({ + issue: createIssue(), + stage: createGateStage({ + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "adversarial-reviewer", + prompt: null, + }, + { + runner: "gemini", + model: "gemini-3-pro", + role: "security-reviewer", + prompt: null, + }, + ], + }), + createReviewerClient: (reviewer) => { + if (reviewer.role === "security-reviewer") { + return createMockClient( + `{"role": "security-reviewer", "model": "gemini-3-pro", "verdict": "fail"}\n\nSQL injection found.`, + ); + } + return createMockClient( + `{"role": "adversarial-reviewer", "model": "gpt-5.3-codex", "verdict": "pass"}\n\nOK`, + ); + }, + }); + + expect(result.aggregate).toBe("fail"); + expect(result.results).toHaveLength(2); + }); + + it("treats reviewer infrastructure errors as error verdicts (not fail)", async () => { + const result = await runEnsembleGate({ + issue: createIssue(), + stage: createGateStage({ + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "adversarial-reviewer", + prompt: null, + }, + ], + }), + createReviewerClient: () => createErrorClient("Connection timeout"), + retryBaseDelayMs: 0, + }); + + // All reviewers errored → aggregate is fail (can't skip review) + expect(result.aggregate).toBe("fail"); + expect(result.results).toHaveLength(1); + expect(result.results[0]!.verdict.verdict).toBe("error"); + expect(result.results[0]!.feedback).toContain("Connection timeout"); + }); + + it("passes gate when one reviewer passes and another errors", async () => { + const result = await runEnsembleGate({ + issue: createIssue(), + stage: createGateStage({ + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "adversarial-reviewer", + prompt: null, + }, + { + runner: "gemini", + model: "gemini-2.5-pro", + role: "security-reviewer", + prompt: null, + }, + ], + }), + createReviewerClient: (reviewer) => { + if (reviewer.role === "security-reviewer") { + return createErrorClient("Rate limit exceeded"); + } + return createMockClient( + `{"role": "adversarial-reviewer", "model": "gpt-5.3-codex", "verdict": "pass"}\n\nLooks good.`, + ); + }, + retryBaseDelayMs: 0, + }); + + // One pass + one error = pass (error doesn't block) + expect(result.aggregate).toBe("pass"); + expect(result.results).toHaveLength(2); + }); + + it("posts aggregated comment to tracker", async () => { + const postedComments: Array<{ issueId: string; body: string }> = []; + const postComment: PostComment = async (issueId, body) => { + postedComments.push({ issueId, body }); + }; + + await runEnsembleGate({ + issue: createIssue({ id: "issue-42" }), + stage: createGateStage({ + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "reviewer", + prompt: null, + }, + ], + }), + createReviewerClient: () => + createMockClient( + '{"role": "reviewer", "model": "gpt-5.3-codex", "verdict": "pass"}\n\nLGTM', + ), + postComment, + }); + + expect(postedComments).toHaveLength(1); + expect(postedComments[0]!.issueId).toBe("issue-42"); + expect(postedComments[0]!.body).toContain("Ensemble Review: PASS"); + }); + + it("survives comment posting failure", async () => { + const postComment: PostComment = async () => { + throw new Error("Network error"); + }; + + const result = await runEnsembleGate({ + issue: createIssue(), + stage: createGateStage({ + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "reviewer", + prompt: null, + }, + ], + }), + createReviewerClient: () => + createMockClient( + '{"role": "reviewer", "model": "gpt-5.3-codex", "verdict": "pass"}\n\nOK', + ), + postComment, + }); + + // Should still succeed despite comment failure + expect(result.aggregate).toBe("pass"); + }); + + it("closes reviewer clients even on error", async () => { + const closeCalls: string[] = []; + const createClient: CreateReviewerClient = (reviewer) => ({ + startSession: async () => { + throw new Error("boom"); + }, + continueTurn: async () => { + throw new Error("not used"); + }, + close: async () => { + closeCalls.push(reviewer.role); + }, + }); + + await runEnsembleGate({ + issue: createIssue(), + stage: createGateStage({ + reviewers: [ + { + runner: "codex", + model: "m", + role: "r1", + prompt: null, + }, + { + runner: "gemini", + model: "m", + role: "r2", + prompt: null, + }, + ], + }), + createReviewerClient: createClient, + retryBaseDelayMs: 0, + }); + + // With retries, close is called once per attempt per reviewer + expect(closeCalls.filter((c) => c === "r1").length).toBeGreaterThanOrEqual( + 1, + ); + expect(closeCalls.filter((c) => c === "r2").length).toBeGreaterThanOrEqual( + 1, + ); + }); +}); + +describe("ensemble gate orchestrator integration", () => { + it("ensemble gate triggers approve and schedules continuation on pass", async () => { + const { OrchestratorCore } = await import("../../src/orchestrator/core.js"); + + const gateResults: EnsembleGateResult[] = []; + const orchestrator = new OrchestratorCore({ + config: createConfig({ + stages: createEnsembleWorkflowConfig(), + }), + tracker: createTracker(), + spawnWorker: async () => ({ + workerHandle: { pid: 1 }, + monitorHandle: { ref: "m" }, + }), + runEnsembleGate: async ({ issue, stage }) => { + const result: EnsembleGateResult = { + aggregate: "pass", + results: [], + comment: "All clear", + }; + gateResults.push(result); + return result; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Dispatch issue into "implement" (agent stage) + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Normal exit advances to "review" (ensemble gate) + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Retry timer dispatches gate — ensemble handler runs + await orchestrator.onRetryTimer("1"); + + // Wait for async gate handler to complete + await vi.waitFor(() => { + expect(gateResults).toHaveLength(1); + }); + + // Gate passed → approveGate called → issue should advance to "merge" + await vi.waitFor(() => { + expect(orchestrator.getState().issueStages["1"]).toBe("merge"); + }); + }); + + it("ensemble gate triggers rework on fail", async () => { + const { OrchestratorCore } = await import("../../src/orchestrator/core.js"); + + const orchestrator = new OrchestratorCore({ + config: createConfig({ + stages: createEnsembleWorkflowConfig(), + }), + tracker: createTracker(), + spawnWorker: async () => ({ + workerHandle: { pid: 1 }, + monitorHandle: { ref: "m" }, + }), + runEnsembleGate: async () => ({ + aggregate: "fail" as const, + results: [], + comment: "Review failed", + }), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + await orchestrator.onRetryTimer("1"); + + await vi.waitFor(() => { + // Gate failed → reworkGate called → issue should go back to "implement" + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + }); + + it("posts escalation comment when rework max exceeded", async () => { + const { OrchestratorCore } = await import("../../src/orchestrator/core.js"); + + const postedComments: Array<{ issueId: string; body: string }> = []; + const orchestrator = new OrchestratorCore({ + config: createConfig({ + stages: createEnsembleWorkflowConfig(), + }), + tracker: createTracker(), + spawnWorker: async () => ({ + workerHandle: { pid: 1 }, + monitorHandle: { ref: "m" }, + }), + runEnsembleGate: async () => ({ + aggregate: "fail" as const, + results: [], + comment: "Review failed", + }), + postComment: async (issueId, body) => { + postedComments.push({ issueId, body }); + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Dispatch → implement stage + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Exhaust max_rework (3) by cycling through rework loops + for (let i = 0; i < 3; i++) { + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + await orchestrator.onRetryTimer("1"); + + // Wait for gate to rework back to implement + await vi.waitFor(() => { + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + // Retry to re-dispatch the implement stage + await orchestrator.onRetryTimer("1"); + } + + // 4th cycle — this should trigger escalation + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + await orchestrator.onRetryTimer("1"); + + // Wait for escalation + await vi.waitFor(() => { + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + }); + + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(postedComments).toHaveLength(1); + expect(postedComments[0]!.issueId).toBe("1"); + expect(postedComments[0]!.body).toContain( + "max rework attempts (3) exceeded", + ); + expect(postedComments[0]!.body).toContain("Escalating for manual review"); + }); + + it("human gate leaves issue in gate state without running handler", async () => { + const { OrchestratorCore } = await import("../../src/orchestrator/core.js"); + + const gateHandlerCalled = vi.fn(); + const orchestrator = new OrchestratorCore({ + config: createConfig({ + stages: createHumanGateWorkflowConfig(), + }), + tracker: createTracker(), + spawnWorker: async () => ({ + workerHandle: { pid: 1 }, + monitorHandle: { ref: "m" }, + }), + runEnsembleGate: async () => { + gateHandlerCalled(); + return { aggregate: "pass" as const, results: [], comment: "" }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Retry timer — human gate should not run ensemble handler + await orchestrator.onRetryTimer("1"); + + // Give it a moment to ensure nothing fires + await new Promise((r) => setTimeout(r, 50)); + + expect(gateHandlerCalled).not.toHaveBeenCalled(); + // Issue stays in review (gate state) + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + }); +}); + +describe("config resolver parses reviewers", () => { + it("parses reviewers from stage config", async () => { + const { resolveStagesConfig } = await import( + "../../src/config/config-resolver.js" + ); + + const result = resolveStagesConfig({ + review: { + type: "gate", + gate_type: "ensemble", + on_approve: "done", + on_rework: "implement", + max_rework: 3, + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "adversarial-reviewer", + prompt: "review-adversarial.liquid", + }, + { + runner: "gemini", + model: "gemini-3-pro", + role: "security-reviewer", + prompt: "review-security.liquid", + }, + ], + }, + implement: { + type: "agent", + on_complete: "review", + }, + done: { + type: "terminal", + }, + }); + + expect(result).not.toBeNull(); + const review = result!.stages.review!; + expect(review.reviewers).toHaveLength(2); + expect(review.reviewers[0]!.runner).toBe("codex"); + expect(review.reviewers[0]!.role).toBe("adversarial-reviewer"); + expect(review.reviewers[0]!.prompt).toBe("review-adversarial.liquid"); + expect(review.reviewers[1]!.runner).toBe("gemini"); + expect(review.reviewers[1]!.role).toBe("security-reviewer"); + }); + + it("returns empty reviewers when not specified", async () => { + const { resolveStagesConfig } = await import( + "../../src/config/config-resolver.js" + ); + + const result = resolveStagesConfig({ + review: { + type: "gate", + gate_type: "ensemble", + on_approve: "done", + }, + done: { + type: "terminal", + }, + }); + + expect(result!.stages.review!.reviewers).toEqual([]); + }); + + it("skips reviewers missing required runner or role", async () => { + const { resolveStagesConfig } = await import( + "../../src/config/config-resolver.js" + ); + + const result = resolveStagesConfig({ + review: { + type: "gate", + gate_type: "ensemble", + on_approve: "done", + reviewers: [ + { runner: "codex", role: "valid-reviewer" }, + { runner: "gemini" }, // missing role + { role: "another-reviewer" }, // missing runner + { model: "m" }, // missing both + ], + }, + done: { + type: "terminal", + }, + }); + + expect(result!.stages.review!.reviewers).toHaveLength(1); + expect(result!.stages.review!.reviewers[0]!.role).toBe("valid-reviewer"); + }); +}); + +// --- Test Helpers --- + +function createResult(overrides?: { + verdict?: "pass" | "fail" | "error"; + role?: string; + feedback?: string; +}): ReviewerResult { + const verdict = overrides?.verdict ?? "pass"; + const role = overrides?.role ?? "test-reviewer"; + return { + reviewer: { + runner: "codex", + model: "test-model", + role, + prompt: null, + }, + verdict: { + role, + model: "test-model", + verdict, + }, + feedback: overrides?.feedback ?? "No issues found.", + raw: "", + }; +} + +function createMockClient(message: string): AgentRunnerCodexClient { + return { + startSession: async () => createTurnResult(message), + continueTurn: async () => createTurnResult(message), + close: async () => {}, + }; +} + +function createErrorClient(errorMessage: string): AgentRunnerCodexClient { + return { + startSession: async () => { + throw new Error(errorMessage); + }, + continueTurn: async () => { + throw new Error(errorMessage); + }, + close: async () => {}, + }; +} + +function createTurnResult(message: string): CodexTurnResult { + return { + status: "completed", + threadId: "thread-1", + turnId: "turn-1", + sessionId: "session-1", + usage: null, + rateLimits: null, + message, + }; +} + +function createIssue(overrides?: Partial<Issue>): Issue { + return { + id: overrides?.id ?? "1", + identifier: overrides?.identifier ?? "ISSUE-1", + title: overrides?.title ?? "Example issue", + description: overrides?.description ?? "Fix the bug in user auth", + priority: overrides?.priority ?? 1, + state: overrides?.state ?? "In Progress", + branchName: overrides?.branchName ?? null, + url: overrides?.url ?? "https://linear.app/project/issue/ISSUE-1", + labels: overrides?.labels ?? [], + blockedBy: overrides?.blockedBy ?? [], + createdAt: overrides?.createdAt ?? "2026-03-01T00:00:00.000Z", + updatedAt: overrides?.updatedAt ?? "2026-03-01T00:00:00.000Z", + }; +} + +function createGateStage(overrides?: { + reviewers?: ReviewerDefinition[]; +}): StageDefinition { + return { + type: "gate", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble", + maxRework: 3, + reviewers: overrides?.reviewers ?? [], + transitions: { + onComplete: null, + onApprove: "merge", + onRework: "implement", + }, + linearState: null, + }; +} + +function createEnsembleWorkflowConfig() { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent" as const, + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "gate" as const, + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble" as const, + maxRework: 3, + reviewers: [ + { + runner: "codex", + model: "gpt-5.3-codex", + role: "adversarial-reviewer", + prompt: null, + }, + ], + transitions: { + onComplete: null, + onApprove: "merge", + onRework: "implement", + }, + linearState: null, + }, + merge: { + type: "agent" as const, + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "merge.liquid", + maxTurns: 5, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal" as const, + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createHumanGateWorkflowConfig() { + const config = createEnsembleWorkflowConfig(); + return { + ...config, + stages: { + ...config.stages, + review: { + ...config.stages.review, + gateType: "human" as const, + reviewers: [], + }, + }, + }; +} + +function createTracker() { + const issue = createIssue(); + return { + async fetchCandidateIssues() { + return [issue]; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [ + { id: issue.id, identifier: issue.identifier, state: issue.state }, + ]; + }, + }; +} + +function createConfig(overrides?: { + stages?: + | ReturnType<typeof createEnsembleWorkflowConfig> + | ReturnType<typeof createHumanGateWorkflowConfig> + | null; +}) { + return { + workflowPath: "/tmp/WORKFLOW.md", + promptTemplate: "Prompt", + tracker: { + kind: "linear", + endpoint: "https://api.linear.app/graphql", + apiKey: "token", + projectSlug: "project", + activeStates: ["Todo", "In Progress", "In Review"], + terminalStates: ["Done", "Canceled"], + }, + polling: { intervalMs: 30_000 }, + workspace: { root: "/tmp/workspaces" }, + hooks: { + afterCreate: null, + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 30_000, + }, + agent: { + maxConcurrentAgents: 2, + maxTurns: 5, + maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, + maxConcurrentAgentsByState: {}, + }, + runner: { kind: "codex", model: null }, + codex: { + command: "codex-app-server", + approvalPolicy: "never", + threadSandbox: null, + turnSandboxPolicy: null, + turnTimeoutMs: 300_000, + readTimeoutMs: 30_000, + stallTimeoutMs: 300_000, + }, + server: { port: null, slackNotifyChannel: null }, + observability: { + dashboardEnabled: true, + refreshMs: 1_000, + renderIntervalMs: 16, + }, + stages: overrides?.stages ?? null, + escalationState: null, + }; +} + +describe("formatExecutionReport", () => { + it("starts with ## Execution Report header", () => { + const history: ExecutionHistory = []; + const report = formatExecutionReport("SYMPH-1", history); + expect(report).toMatch(/^## Execution Report/); + }); + + it("includes issue identifier", () => { + const history: ExecutionHistory = []; + const report = formatExecutionReport("SYMPH-42", history); + expect(report).toContain("SYMPH-42"); + }); + + it("contains stage timeline table with correct columns", () => { + const history: ExecutionHistory = [ + { + stageName: "investigate", + durationMs: 18_000, + totalTokens: 50_000, + turns: 5, + outcome: "normal", + }, + ]; + const report = formatExecutionReport("SYMPH-1", history); + expect(report).toContain("| Stage |"); + expect(report).toContain("| Duration |"); + expect(report).toContain("| Tokens |"); + expect(report).toContain("| Turns |"); + expect(report).toContain("| Outcome |"); + }); + + it("includes each stage record in the table", () => { + const history: ExecutionHistory = [ + { + stageName: "investigate", + durationMs: 18_000, + totalTokens: 50_000, + turns: 5, + outcome: "normal", + }, + { + stageName: "implement", + durationMs: 120_000, + totalTokens: 200_000, + turns: 10, + outcome: "normal", + }, + ]; + const report = formatExecutionReport("SYMPH-1", history); + expect(report).toContain("investigate"); + expect(report).toContain("18s"); + expect(report).toContain("implement"); + expect(report).toContain("120s"); + expect(report).toContain("normal"); + }); + + it("includes total tokens across all stages", () => { + const history: ExecutionHistory = [ + { + stageName: "investigate", + durationMs: 18_000, + totalTokens: 50_000, + turns: 5, + outcome: "normal", + }, + { + stageName: "implement", + durationMs: 120_000, + totalTokens: 200_000, + turns: 10, + outcome: "normal", + }, + { + stageName: "review", + durationMs: 45_000, + totalTokens: 80_000, + turns: 3, + outcome: "normal", + }, + { + stageName: "merge", + durationMs: 10_000, + totalTokens: 20_000, + turns: 2, + outcome: "normal", + }, + ]; + const report = formatExecutionReport("SYMPH-1", history); + // Total = 50000 + 200000 + 80000 + 20000 = 350000 + expect(report).toContain("350,000"); + expect(report).toContain("Total tokens"); + }); + + it("includes rework count when provided and non-zero", () => { + const history: ExecutionHistory = [ + { + stageName: "implement", + durationMs: 60_000, + totalTokens: 100_000, + turns: 8, + outcome: "normal", + }, + ]; + const report = formatExecutionReport("SYMPH-1", history, 1); + expect(report).toContain("Rework count"); + expect(report).toContain("1"); + }); + + it("omits rework count line when rework count is zero", () => { + const history: ExecutionHistory = []; + const report = formatExecutionReport("SYMPH-1", history, 0); + expect(report).not.toContain("Rework count"); + }); + + it("omits rework count line when not provided", () => { + const history: ExecutionHistory = []; + const report = formatExecutionReport("SYMPH-1", history); + expect(report).not.toContain("Rework count"); + }); + + it("handles empty history with total tokens of zero", () => { + const history: ExecutionHistory = []; + const report = formatExecutionReport("SYMPH-1", history); + expect(report).toContain("Total tokens"); + expect(report).toContain("0"); + }); + + it("version footer is present at end of execution report", () => { + const history: ExecutionHistory = []; + const report = formatExecutionReport("SYMPH-1", history); + expect(report).toMatch(/symphony-ts v.+$/); + }); +}); diff --git a/tests/orchestrator/pipeline-notifier.test.ts b/tests/orchestrator/pipeline-notifier.test.ts new file mode 100644 index 00000000..2b1cb1b1 --- /dev/null +++ b/tests/orchestrator/pipeline-notifier.test.ts @@ -0,0 +1,358 @@ +import { describe, expect, it, vi } from "vitest"; + +import { + PipelineNotifier, + formatDurationMs, + formatNotification, + formatStageTimeline, +} from "../../src/orchestrator/pipeline-notifier.js"; +import type { + NotificationPoster, + PipelineNotificationEvent, +} from "../../src/orchestrator/pipeline-notifier.js"; + +describe("formatDurationMs", () => { + it("formats seconds only", () => { + expect(formatDurationMs(45_000)).toBe("45s"); + }); + + it("formats minutes and seconds", () => { + expect(formatDurationMs(125_000)).toBe("2m 5s"); + }); + + it("formats exact minutes without seconds", () => { + expect(formatDurationMs(180_000)).toBe("3m"); + }); + + it("formats hours and minutes", () => { + expect(formatDurationMs(3_720_000)).toBe("1h 2m"); + }); + + it("formats exact hours without minutes", () => { + expect(formatDurationMs(7_200_000)).toBe("2h"); + }); + + it("rounds sub-second durations to zero", () => { + expect(formatDurationMs(499)).toBe("0s"); + }); +}); + +describe("formatStageTimeline", () => { + it("returns placeholder for empty history", () => { + expect(formatStageTimeline([])).toBe("_No stage data_"); + }); + + it("formats a single stage record", () => { + const result = formatStageTimeline([ + { + stageName: "investigate", + durationMs: 90_000, + totalTokens: 12345, + turns: 3, + outcome: "completed", + }, + ]); + expect(result).toContain("investigate"); + expect(result).toContain("1m 30s"); + expect(result).toContain("12,345 tokens"); + expect(result).toContain("completed"); + }); + + it("formats multiple stages on separate lines", () => { + const result = formatStageTimeline([ + { + stageName: "investigate", + durationMs: 60_000, + totalTokens: 5000, + turns: 2, + outcome: "completed", + }, + { + stageName: "implement", + durationMs: 120_000, + totalTokens: 15000, + turns: 5, + outcome: "completed", + }, + ]); + const lines = result.split("\n"); + expect(lines).toHaveLength(2); + expect(lines[0]).toContain("investigate"); + expect(lines[1]).toContain("implement"); + }); +}); + +describe("formatNotification", () => { + it("formats pipeline_started", () => { + const text = formatNotification({ + type: "pipeline_started", + productName: "symphony", + dashboardUrl: "http://localhost:3000", + }); + expect(text).toContain("Pipeline started"); + expect(text).toContain("symphony"); + expect(text).toContain("http://localhost:3000"); + }); + + it("formats pipeline_started without dashboard url", () => { + const text = formatNotification({ + type: "pipeline_started", + productName: "symphony", + dashboardUrl: null, + }); + expect(text).toContain("Pipeline started"); + expect(text).not.toContain("Dashboard"); + }); + + it("formats pipeline_stopped", () => { + const text = formatNotification({ + type: "pipeline_stopped", + productName: "symphony", + completedCount: 5, + failedCount: 2, + durationMs: 3_600_000, + }); + expect(text).toContain("Pipeline stopped"); + expect(text).toContain("Completed: 5"); + expect(text).toContain("Failed: 2"); + expect(text).toContain("Total: 7"); + expect(text).toContain("1h"); + }); + + it("formats issue_completed", () => { + const text = formatNotification({ + type: "issue_completed", + issueIdentifier: "SYMPH-42", + issueTitle: "Add pagination", + issueUrl: "https://linear.app/mobilyze-llc/issue/SYMPH-42", + executionHistory: [ + { + stageName: "investigate", + durationMs: 60_000, + totalTokens: 5000, + turns: 2, + outcome: "completed", + }, + { + stageName: "implement", + durationMs: 120_000, + totalTokens: 15000, + turns: 5, + outcome: "completed", + }, + ], + reworkCount: 1, + totalTokens: 20000, + totalDurationMs: 180_000, + }); + expect(text).toContain("Issue completed"); + expect(text).toContain("SYMPH-42"); + expect(text).toContain("Add pagination"); + expect(text).toContain("investigate"); + expect(text).toContain("implement"); + expect(text).toContain("20,000 tokens"); + expect(text).toContain("Rework cycles: 1"); + }); + + it("formats issue_completed without rework", () => { + const text = formatNotification({ + type: "issue_completed", + issueIdentifier: "SYMPH-42", + issueTitle: "Add pagination", + issueUrl: null, + executionHistory: [], + reworkCount: 0, + totalTokens: 10000, + totalDurationMs: 60_000, + }); + expect(text).not.toContain("Rework"); + }); + + it("formats issue_failed", () => { + const text = formatNotification({ + type: "issue_failed", + issueIdentifier: "SYMPH-42", + issueTitle: "Add pagination", + issueUrl: "https://linear.app/mobilyze-llc/issue/SYMPH-42", + failureReason: "Max retries exceeded", + retriesExhausted: true, + retryAttempt: 3, + }); + expect(text).toContain("Issue failed"); + expect(text).toContain("SYMPH-42"); + expect(text).toContain("Max retries exceeded"); + expect(text).toContain("Retries exhausted (attempt 3)"); + }); + + it("formats issue_failed without exhaustion", () => { + const text = formatNotification({ + type: "issue_failed", + issueIdentifier: "SYMPH-42", + issueTitle: "Fix bug", + issueUrl: null, + failureReason: "worker failed", + retriesExhausted: false, + retryAttempt: null, + }); + expect(text).toContain("Issue failed"); + expect(text).not.toContain("Retries exhausted"); + }); + + it("formats stall_killed", () => { + const text = formatNotification({ + type: "stall_killed", + issueIdentifier: "SYMPH-42", + issueTitle: "Add pagination", + stageName: "implement", + stallDurationMs: 900_000, + }); + expect(text).toContain("Stall killed"); + expect(text).toContain("SYMPH-42"); + expect(text).toContain("Stage: implement"); + expect(text).toContain("15m"); + }); + + it("formats stall_killed without stage name", () => { + const text = formatNotification({ + type: "stall_killed", + issueIdentifier: "SYMPH-42", + issueTitle: "Fix bug", + stageName: null, + stallDurationMs: 300_000, + }); + expect(text).not.toContain("Stage:"); + }); + + it("formats infra_error", () => { + const text = formatNotification({ + type: "infra_error", + issueIdentifier: "SYMPH-42", + issueTitle: "Add pagination", + errorReason: "Failed to start agent process", + }); + expect(text).toContain("Infra error"); + expect(text).toContain("SYMPH-42"); + expect(text).toContain("Failed to start agent process"); + }); +}); + +describe("PipelineNotifier", () => { + function createMockPoster(): NotificationPoster & { + calls: Array<{ channel: string; text: string }>; + } { + const calls: Array<{ channel: string; text: string }> = []; + return { + calls, + async post(channel: string, text: string): Promise<void> { + calls.push({ channel, text }); + }, + }; + } + + it("posts formatted notification to configured channel", async () => { + const poster = createMockPoster(); + const notifier = new PipelineNotifier({ + channel: "C12345", + poster, + }); + + notifier.notify({ + type: "pipeline_started", + productName: "symphony", + dashboardUrl: null, + }); + + // Wait for the async post + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(poster.calls).toHaveLength(1); + expect(poster.calls[0]?.channel).toBe("C12345"); + expect(poster.calls[0]?.text).toContain("Pipeline started"); + }); + + it("swallows errors and calls onError callback", async () => { + const errors: unknown[] = []; + const failingPoster: NotificationPoster = { + async post(): Promise<void> { + throw new Error("Slack API down"); + }, + }; + const notifier = new PipelineNotifier({ + channel: "C12345", + poster: failingPoster, + onError: (err) => errors.push(err), + }); + + notifier.notify({ + type: "pipeline_started", + productName: "symphony", + dashboardUrl: null, + }); + + // Wait for the async rejection + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(errors).toHaveLength(1); + expect(errors[0]).toBeInstanceOf(Error); + expect((errors[0] as Error).message).toBe("Slack API down"); + }); + + it("swallows errors silently when no onError callback provided", async () => { + const failingPoster: NotificationPoster = { + async post(): Promise<void> { + throw new Error("Slack API down"); + }, + }; + const notifier = new PipelineNotifier({ + channel: "C12345", + poster: failingPoster, + }); + + // Should not throw + notifier.notify({ + type: "pipeline_started", + productName: "symphony", + dashboardUrl: null, + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + }); + + it("sends multiple events to the same channel", async () => { + const poster = createMockPoster(); + const notifier = new PipelineNotifier({ + channel: "C12345", + poster, + }); + + const events: PipelineNotificationEvent[] = [ + { type: "pipeline_started", productName: "test", dashboardUrl: null }, + { + type: "issue_completed", + issueIdentifier: "TEST-1", + issueTitle: "Test", + issueUrl: null, + executionHistory: [], + reworkCount: 0, + totalTokens: 100, + totalDurationMs: 1000, + }, + { + type: "pipeline_stopped", + productName: "test", + completedCount: 1, + failedCount: 0, + durationMs: 5000, + }, + ]; + + for (const event of events) { + notifier.notify(event); + } + + await new Promise((resolve) => setTimeout(resolve, 50)); + + expect(poster.calls).toHaveLength(3); + expect(poster.calls.every((c) => c.channel === "C12345")).toBe(true); + }); +}); diff --git a/tests/orchestrator/retry-delay-type.test.ts b/tests/orchestrator/retry-delay-type.test.ts new file mode 100644 index 00000000..ad2cc09f --- /dev/null +++ b/tests/orchestrator/retry-delay-type.test.ts @@ -0,0 +1,423 @@ +import { describe, expect, it } from "vitest"; + +import type { ResolvedWorkflowConfig } from "../../src/config/types.js"; +import type { Issue } from "../../src/domain/model.js"; +import { OrchestratorCore } from "../../src/orchestrator/core.js"; +import type { IssueTracker } from "../../src/tracker/tracker.js"; + +describe("onRetryTimer preserves delayType from retry entry", () => { + it("preserves continuation delayType when tracker fetch fails", async () => { + let fetchCallCount = 0; + const tracker: IssueTracker = { + async fetchCandidateIssues() { + fetchCallCount++; + // First call succeeds (pollTick dispatch), subsequent calls fail + if (fetchCallCount <= 1) { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + } + throw new Error("tracker API outage"); + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }]; + }, + }; + + const timers = createFakeTimerScheduler(); + const orchestrator = new OrchestratorCore({ + config: createConfig({ agent: { maxRetryAttempts: 2 } }), + tracker, + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Dispatch via pollTick + await orchestrator.pollTick(); + + // Normal exit -> continuation retry (attempt=1, delayType="continuation") + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:00:05.000Z"), + }); + + expect(retryEntry).not.toBeNull(); + expect(retryEntry).toMatchObject({ + issueId: "1", + attempt: 1, + delayType: "continuation", + }); + + // Fire retry timer — tracker fetch will fail + const result = await orchestrator.onRetryTimer("1"); + + expect(result.dispatched).toBe(false); + expect(result.released).toBe(false); + // The rescheduled retry must preserve delayType: "continuation" + expect(result.retryEntry).not.toBeNull(); + expect(result.retryEntry).toMatchObject({ + issueId: "1", + attempt: 2, + error: "retry poll failed", + delayType: "continuation", + }); + + // Continuation retries should NOT count against maxRetryAttempts. + // The issue is in the completed set because onWorkerExit adds it there + // before scheduling a continuation retry (this is normal — completed + // issues can be resumed via the "Resume"/"Todo" state check). + // The key assertion is that claimed is still true (not released/escalated). + expect(orchestrator.getState().claimed.has("1")).toBe(true); + }); + + it("preserves failure delayType when tracker fetch fails", async () => { + let fetchCallCount = 0; + const tracker: IssueTracker = { + async fetchCandidateIssues() { + fetchCallCount++; + if (fetchCallCount <= 1) { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + } + throw new Error("tracker API outage"); + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }]; + }, + }; + + const timers = createFakeTimerScheduler(); + const orchestrator = new OrchestratorCore({ + config: createConfig({ agent: { maxRetryAttempts: 5 } }), + tracker, + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await orchestrator.pollTick(); + + // Abnormal exit -> failure retry (attempt=1, delayType="failure") + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "turn failed", + }); + + expect(retryEntry).not.toBeNull(); + expect(retryEntry).toMatchObject({ + issueId: "1", + attempt: 1, + delayType: "failure", + }); + + // Fire retry timer — tracker fetch will fail + const result = await orchestrator.onRetryTimer("1"); + + expect(result.dispatched).toBe(false); + expect(result.released).toBe(false); + expect(result.retryEntry).not.toBeNull(); + expect(result.retryEntry).toMatchObject({ + issueId: "1", + attempt: 2, + error: "retry poll failed", + delayType: "failure", + }); + }); + + it("preserves continuation delayType when no orchestrator slots available", async () => { + const timers = createFakeTimerScheduler(); + const tracker: IssueTracker = { + async fetchCandidateIssues() { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }]; + }, + }; + + const orchestrator = new OrchestratorCore({ + config: createConfig({ + agent: { maxConcurrentAgents: 0, maxRetryAttempts: 2 }, + }), + tracker, + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Manually create a continuation retry entry + orchestrator.getState().claimed.add("1"); + orchestrator.getState().retryAttempts["1"] = { + issueId: "1", + identifier: "ISSUE-1", + attempt: 1, + dueAtMs: Date.parse("2026-03-06T00:00:00.000Z"), + timerHandle: null, + error: null, + delayType: "continuation", + }; + + // Fire retry timer — no slots available + const result = await orchestrator.onRetryTimer("1"); + + expect(result.dispatched).toBe(false); + expect(result.released).toBe(false); + expect(result.retryEntry).not.toBeNull(); + expect(result.retryEntry).toMatchObject({ + issueId: "1", + attempt: 2, + error: "no available orchestrator slots", + delayType: "continuation", + }); + + // Continuation retries should NOT trigger escalation + expect(orchestrator.getState().completed.has("1")).toBe(false); + expect(orchestrator.getState().claimed.has("1")).toBe(true); + }); + + it("continuation retry that hits repeated tracker failures does NOT escalate at maxRetryAttempts", async () => { + const escalationComments: Array<{ issueId: string; body: string }> = []; + let fetchCallCount = 0; + const tracker: IssueTracker = { + async fetchCandidateIssues() { + fetchCallCount++; + if (fetchCallCount <= 1) { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + } + throw new Error("tracker API outage"); + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }]; + }, + }; + + const timers = createFakeTimerScheduler(); + const orchestrator = new OrchestratorCore({ + config: createConfig({ agent: { maxRetryAttempts: 2 } }), + tracker, + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + escalationComments.push({ issueId, body }); + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Dispatch via pollTick + await orchestrator.pollTick(); + + // Normal exit -> continuation retry (attempt=1) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + endedAt: new Date("2026-03-06T00:00:05.000Z"), + }); + + // First tracker failure: continuation retry bumps to attempt=2 + const result1 = await orchestrator.onRetryTimer("1"); + expect(result1.retryEntry).toMatchObject({ + attempt: 2, + delayType: "continuation", + }); + + // Second tracker failure: continuation retry bumps to attempt=3 + // With maxRetryAttempts=2, a failure retry at attempt=3 would escalate. + // But since this is a continuation, it should NOT escalate. + const result2 = await orchestrator.onRetryTimer("1"); + expect(result2.retryEntry).not.toBeNull(); + expect(result2.retryEntry).toMatchObject({ + attempt: 3, + delayType: "continuation", + }); + + // No escalation should have occurred — the key assertion is that + // escalationComments is empty and the claim is still held. + // completed is true because onWorkerExit marks normal exits as completed + // before scheduling continuation retries (this is normal behavior). + expect(escalationComments).toHaveLength(0); + expect(orchestrator.getState().claimed.has("1")).toBe(true); + }); + + it("failure retry that hits repeated tracker failures DOES escalate at maxRetryAttempts", async () => { + const escalationComments: Array<{ issueId: string; body: string }> = []; + let fetchCallCount = 0; + const tracker: IssueTracker = { + async fetchCandidateIssues() { + fetchCallCount++; + if (fetchCallCount <= 1) { + return [createIssue({ id: "1", identifier: "ISSUE-1" })]; + } + throw new Error("tracker API outage"); + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }]; + }, + }; + + const timers = createFakeTimerScheduler(); + const orchestrator = new OrchestratorCore({ + config: createConfig({ agent: { maxRetryAttempts: 2 } }), + tracker, + spawnWorker: async () => ({ + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }), + postComment: async (issueId, body) => { + escalationComments.push({ issueId, body }); + }, + timerScheduler: timers, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Dispatch via pollTick + await orchestrator.pollTick(); + + // Abnormal exit -> failure retry (attempt=1) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "turn failed", + }); + + // First tracker failure: failure retry bumps to attempt=2 (at limit) + const result1 = await orchestrator.onRetryTimer("1"); + expect(result1.retryEntry).toMatchObject({ + attempt: 2, + delayType: "failure", + }); + + // Second tracker failure: failure retry bumps to attempt=3 (exceeds limit of 2) + // This SHOULD escalate + const result2 = await orchestrator.onRetryTimer("1"); + expect(result2.retryEntry).toBeNull(); + + // Escalation should have occurred + expect(escalationComments).toHaveLength(1); + expect(escalationComments[0]?.body).toContain( + "Max retry attempts (2) exceeded", + ); + expect(orchestrator.getState().failed.has("1")).toBe(true); + expect(orchestrator.getState().claimed.has("1")).toBe(false); + }); +}); + +function createIssue(overrides?: Partial<Issue>): Issue { + return { + id: overrides?.id ?? "1", + identifier: overrides?.identifier ?? "ISSUE-1", + title: overrides?.title ?? "Example issue", + description: overrides?.description ?? null, + priority: overrides?.priority ?? 1, + state: overrides?.state ?? "In Progress", + branchName: overrides?.branchName ?? null, + url: overrides?.url ?? null, + labels: overrides?.labels ?? [], + blockedBy: overrides?.blockedBy ?? [], + createdAt: overrides?.createdAt ?? "2026-03-01T00:00:00.000Z", + updatedAt: overrides?.updatedAt ?? "2026-03-01T00:00:00.000Z", + }; +} + +function createConfig(overrides?: { + agent?: Partial<ResolvedWorkflowConfig["agent"]>; +}): ResolvedWorkflowConfig { + return { + workflowPath: "/tmp/WORKFLOW.md", + promptTemplate: "Prompt", + tracker: { + kind: "linear", + endpoint: "https://api.linear.app/graphql", + apiKey: "token", + projectSlug: "project", + activeStates: ["Todo", "In Progress", "In Review"], + terminalStates: ["Done", "Canceled"], + }, + polling: { + intervalMs: 30_000, + }, + workspace: { + root: "/tmp/workspaces", + }, + hooks: { + afterCreate: null, + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 30_000, + }, + agent: { + maxConcurrentAgents: 2, + maxTurns: 5, + maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, + maxConcurrentAgentsByState: {}, + ...overrides?.agent, + }, + codex: { + command: "codex-app-server", + approvalPolicy: "never", + threadSandbox: null, + turnSandboxPolicy: null, + turnTimeoutMs: 300_000, + readTimeoutMs: 30_000, + stallTimeoutMs: 300_000, + }, + server: { + port: null, + slackNotifyChannel: null, + }, + observability: { + dashboardEnabled: true, + refreshMs: 1_000, + renderIntervalMs: 16, + }, + runner: { + kind: "codex", + model: null, + }, + stages: null, + escalationState: null, + }; +} + +function createFakeTimerScheduler() { + const scheduled: Array<{ + callback: () => void; + delayMs: number; + }> = []; + return { + scheduled, + set(callback: () => void, delayMs: number) { + scheduled.push({ callback, delayMs }); + return { callback, delayMs } as unknown as ReturnType<typeof setTimeout>; + }, + clear() {}, + }; +} diff --git a/tests/orchestrator/runtime-host.test.ts b/tests/orchestrator/runtime-host.test.ts index 7cbf7bd5..00d595e2 100644 --- a/tests/orchestrator/runtime-host.test.ts +++ b/tests/orchestrator/runtime-host.test.ts @@ -10,7 +10,12 @@ import { type StructuredLogEntry, StructuredLogger, } from "../../src/logging/structured-logger.js"; -import { OrchestratorRuntimeHost } from "../../src/orchestrator/runtime-host.js"; +import type { PipelineNotificationEvent } from "../../src/orchestrator/pipeline-notifier.js"; +import { + OrchestratorRuntimeHost, + extractProductName, + startRuntimeService, +} from "../../src/orchestrator/runtime-host.js"; import type { IssueStateSnapshot, IssueTracker, @@ -72,6 +77,9 @@ describe("OrchestratorRuntimeHost", () => { input_tokens: 11, output_tokens: 7, total_tokens: 18, + cache_read_tokens: 0, + cache_write_tokens: 0, + reasoning_tokens: 0, }, }), ]); @@ -103,10 +111,23 @@ describe("OrchestratorRuntimeHost", () => { codexInputTokens: 11, codexOutputTokens: 7, codexTotalTokens: 18, + codexCacheReadTokens: 0, + codexCacheWriteTokens: 0, + codexNoCacheTokens: 0, + codexReasoningTokens: 0, + codexTotalInputTokens: 11, + codexTotalOutputTokens: 7, lastReportedInputTokens: 11, lastReportedOutputTokens: 7, lastReportedTotalTokens: 18, turnCount: 1, + totalStageInputTokens: 0, + totalStageOutputTokens: 0, + totalStageTotalTokens: 0, + totalStageCacheReadTokens: 0, + totalStageCacheWriteTokens: 0, + turnHistory: [], + recentActivity: [], }, turnsCompleted: 1, lastTurn: null, @@ -278,144 +299,1660 @@ describe("OrchestratorRuntimeHost", () => { }), ); }); -}); -class FakeAgentRunner { - onEvent: ((event: AgentRunnerEvent) => void) | undefined; - readonly runs = new Map< - string, - { - resolve: (result: AgentRunResult) => void; - reject: (error: Error) => void; - } - >(); - readonly abortReasons: string[] = []; + it("logs turn_number, prompt_chars, and estimated_prompt_tokens for turn_completed events", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); - async run(input: { - issue: Issue; - attempt: number | null; - signal?: AbortSignal; - }): Promise<AgentRunResult> { - return await new Promise<AgentRunResult>((resolve, reject) => { - this.runs.set(input.issue.id, { resolve, reject }); - input.signal?.addEventListener( - "abort", - () => { - const reason = - typeof input.signal?.reason === "string" - ? input.signal.reason - : "aborted"; - this.abortReasons.push(reason); - reject(new Error(reason)); + await host.pollOnce(); + fakeRunner.emit("1", { + event: "turn_completed", + timestamp: "2026-03-06T00:00:02.000Z", + codexAppServerPid: "1001", + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + turnCount: 1, + promptChars: 1200, + estimatedPromptTokens: 300, + usage: { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }, + message: "turn done", + }); + await host.flushEvents(); + + const turnCompletedEntry = entries.find( + (e) => e.event === "turn_completed", + ); + expect(turnCompletedEntry).toBeDefined(); + expect(turnCompletedEntry).toMatchObject({ + event: "turn_completed", + turn_number: 1, + prompt_chars: 1200, + estimated_prompt_tokens: 300, + }); + }); + + it("emits stage_completed event on normal worker exit with token and turn fields", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); }, - { once: true }, - ); + }, + ]); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), }); - } - emit( - issueId: string, - event: Omit< - AgentRunnerEvent, - "issueId" | "issueIdentifier" | "attempt" | "workspacePath" | "turnCount" - > & - Partial<Pick<AgentRunnerEvent, "turnCount">>, - ): void { - this.onEvent?.({ - ...event, - issueId, - issueIdentifier: "ISSUE-1", - attempt: null, - workspacePath: "/tmp/workspaces/1", - turnCount: event.turnCount ?? 0, + await host.pollOnce(); + fakeRunner.resolve("1", { + issue: createIssue({ state: "In Progress" }), + workspace: { + path: "/tmp/workspaces/1", + workspaceKey: "1", + createdNow: true, + }, + runAttempt: { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + workspacePath: "/tmp/workspaces/1", + startedAt: "2026-03-06T00:00:00.000Z", + status: "succeeded", + }, + liveSession: { + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + codexAppServerPid: "1001", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T00:00:02.000Z", + lastCodexMessage: "done", + codexInputTokens: 100, + codexOutputTokens: 50, + codexTotalTokens: 150, + codexCacheReadTokens: 10, + codexCacheWriteTokens: 5, + codexNoCacheTokens: 0, + codexReasoningTokens: 20, + codexTotalInputTokens: 280, + codexTotalOutputTokens: 140, + lastReportedInputTokens: 100, + lastReportedOutputTokens: 50, + lastReportedTotalTokens: 150, + turnCount: 3, + totalStageInputTokens: 300, + totalStageOutputTokens: 150, + totalStageTotalTokens: 450, + totalStageCacheReadTokens: 30, + totalStageCacheWriteTokens: 15, + turnHistory: [], + recentActivity: [], + }, + turnsCompleted: 3, + lastTurn: null, + rateLimits: null, }); - } + await host.waitForIdle(); - resolve(issueId: string, result: AgentRunResult): void { - const run = this.runs.get(issueId); - if (run === undefined) { - throw new Error(`No fake run registered for ${issueId}.`); - } - this.runs.delete(issueId); - run.resolve(result); - } -} + const stageCompletedEntry = entries.find( + (e) => e.event === "stage_completed", + ); + expect(stageCompletedEntry).toBeDefined(); + expect(stageCompletedEntry).toMatchObject({ + event: "stage_completed", + level: "info", + issue_id: "1", + issue_identifier: "ISSUE-1", + session_id: "thread-1-turn-1", + stage_name: null, + input_tokens: 100, + output_tokens: 50, + total_tokens: 150, + cache_read_tokens: 10, + cache_write_tokens: 5, + reasoning_tokens: 20, + turns_used: 3, + total_input_tokens: 300, + total_output_tokens: 150, + total_total_tokens: 450, + total_cache_read_tokens: 30, + total_cache_write_tokens: 15, + turn_count: 3, + duration_ms: 5000, + outcome: "completed", + }); + }); -function createTracker(input?: { candidates?: Issue[] }) { - let candidates = input?.candidates ?? [createIssue()]; - let stateSnapshots: IssueStateSnapshot[] = [ - { id: "1", identifier: "ISSUE-1", state: "In Progress" }, - ]; + it("emits stage_completed event on abnormal worker exit with outcome failed", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); - const tracker: IssueTracker & { - setCandidates(next: Issue[]): void; - setStateSnapshots(next: IssueStateSnapshot[]): void; - } = { - fetchCandidateIssues: vi.fn(async () => candidates), - fetchIssuesByStates: vi.fn(async () => []), - fetchIssueStatesByIds: vi.fn(async () => stateSnapshots), - setCandidates(next) { - candidates = next; - }, - setStateSnapshots(next) { - stateSnapshots = next; - }, - }; + await host.pollOnce(); + fakeRunner.reject("1", new Error("something went wrong")); + await host.waitForIdle(); - return tracker; -} + const stageCompletedEntry = entries.find( + (e) => e.event === "stage_completed", + ); + expect(stageCompletedEntry).toBeDefined(); + expect(stageCompletedEntry).toMatchObject({ + event: "stage_completed", + level: "info", + issue_id: "1", + issue_identifier: "ISSUE-1", + stage_name: null, + input_tokens: 0, + output_tokens: 0, + total_tokens: 0, + turns_used: 0, + total_input_tokens: 0, + total_output_tokens: 0, + total_total_tokens: 0, + turn_count: 0, + duration_ms: 0, + outcome: "failed", + }); + }); -function createIssue(overrides?: Partial<Issue>): Issue { - return { - id: "1", - identifier: "ISSUE-1", - title: "Issue 1", - description: null, - priority: 1, - state: "In Progress", - branchName: null, - url: null, - labels: [], - blockedBy: [], - createdAt: "2026-03-01T00:00:00.000Z", - updatedAt: "2026-03-01T00:00:00.000Z", - ...overrides, - }; -} + it("emits stage_completed with correct stage_name when stages are configured", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + const host = new OrchestratorRuntimeHost({ + config: createStagedConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); -function createConfig(): ResolvedWorkflowConfig { - return { - workflowPath: "/tmp/WORKFLOW.md", - promptTemplate: "Prompt", - tracker: { - kind: "linear", - endpoint: "https://api.linear.app/graphql", - apiKey: "token", - projectSlug: "project", - activeStates: ["Todo", "In Progress", "In Review"], - terminalStates: ["Done", "Canceled"], - }, - polling: { - intervalMs: 30_000, - }, - workspace: { - root: "/tmp/workspaces", - }, - hooks: { - afterCreate: null, - beforeRun: null, - afterRun: null, - beforeRemove: null, - timeoutMs: 30_000, - }, - agent: { - maxConcurrentAgents: 2, - maxTurns: 5, - maxRetryBackoffMs: 300_000, - maxConcurrentAgentsByState: {}, - }, - codex: { - command: "codex-app-server", + await host.pollOnce(); + fakeRunner.resolve("1", { + issue: createIssue({ state: "In Progress" }), + workspace: { + path: "/tmp/workspaces/1", + workspaceKey: "1", + createdNow: true, + }, + runAttempt: { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + workspacePath: "/tmp/workspaces/1", + startedAt: "2026-03-06T00:00:00.000Z", + status: "succeeded", + }, + liveSession: { + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + codexAppServerPid: "1001", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T00:00:02.000Z", + lastCodexMessage: "done", + codexInputTokens: 30, + codexOutputTokens: 20, + codexTotalTokens: 50, + codexCacheReadTokens: 0, + codexCacheWriteTokens: 0, + codexNoCacheTokens: 0, + codexReasoningTokens: 0, + codexTotalInputTokens: 60, + codexTotalOutputTokens: 40, + lastReportedInputTokens: 30, + lastReportedOutputTokens: 20, + lastReportedTotalTokens: 50, + turnCount: 2, + totalStageInputTokens: 0, + totalStageOutputTokens: 0, + totalStageTotalTokens: 0, + totalStageCacheReadTokens: 0, + totalStageCacheWriteTokens: 0, + turnHistory: [], + recentActivity: [], + }, + turnsCompleted: 2, + lastTurn: null, + rateLimits: null, + }); + await host.waitForIdle(); + + const stageCompletedEntry = entries.find( + (e) => e.event === "stage_completed", + ); + expect(stageCompletedEntry).toBeDefined(); + expect(stageCompletedEntry).toMatchObject({ + event: "stage_completed", + stage_name: "investigate", + turns_used: 2, + turn_count: 2, + }); + }); + + it("includes no_cache_tokens in stage_completed when codexNoCacheTokens is non-zero", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + fakeRunner.resolve("1", { + issue: createIssue({ state: "In Progress" }), + workspace: { + path: "/tmp/workspaces/1", + workspaceKey: "1", + createdNow: true, + }, + runAttempt: { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + workspacePath: "/tmp/workspaces/1", + startedAt: "2026-03-06T00:00:00.000Z", + status: "succeeded", + }, + liveSession: { + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + codexAppServerPid: "1001", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T00:00:02.000Z", + lastCodexMessage: "done", + codexInputTokens: 100, + codexOutputTokens: 50, + codexTotalTokens: 150, + codexCacheReadTokens: 0, + codexCacheWriteTokens: 0, + codexNoCacheTokens: 42, + codexReasoningTokens: 0, + codexTotalInputTokens: 100, + codexTotalOutputTokens: 50, + lastReportedInputTokens: 100, + lastReportedOutputTokens: 50, + lastReportedTotalTokens: 150, + turnCount: 1, + totalStageInputTokens: 0, + totalStageOutputTokens: 0, + totalStageTotalTokens: 0, + totalStageCacheReadTokens: 0, + totalStageCacheWriteTokens: 0, + turnHistory: [], + recentActivity: [], + }, + turnsCompleted: 1, + lastTurn: null, + rateLimits: null, + }); + await host.waitForIdle(); + + const stageCompletedEntry = entries.find( + (e) => e.event === "stage_completed", + ); + expect(stageCompletedEntry).toBeDefined(); + expect(stageCompletedEntry).toMatchObject({ + event: "stage_completed", + no_cache_tokens: 42, + }); + }); + + it("omits no_cache_tokens from stage_completed when codexNoCacheTokens is zero", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + fakeRunner.resolve("1", { + issue: createIssue({ state: "In Progress" }), + workspace: { + path: "/tmp/workspaces/1", + workspaceKey: "1", + createdNow: true, + }, + runAttempt: { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + workspacePath: "/tmp/workspaces/1", + startedAt: "2026-03-06T00:00:00.000Z", + status: "succeeded", + }, + liveSession: { + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + codexAppServerPid: "1001", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T00:00:02.000Z", + lastCodexMessage: "done", + codexInputTokens: 100, + codexOutputTokens: 50, + codexTotalTokens: 150, + codexCacheReadTokens: 0, + codexCacheWriteTokens: 0, + codexNoCacheTokens: 0, + codexReasoningTokens: 0, + codexTotalInputTokens: 100, + codexTotalOutputTokens: 50, + lastReportedInputTokens: 100, + lastReportedOutputTokens: 50, + lastReportedTotalTokens: 150, + turnCount: 1, + totalStageInputTokens: 0, + totalStageOutputTokens: 0, + totalStageTotalTokens: 0, + totalStageCacheReadTokens: 0, + totalStageCacheWriteTokens: 0, + turnHistory: [], + recentActivity: [], + }, + turnsCompleted: 1, + lastTurn: null, + rateLimits: null, + }); + await host.waitForIdle(); + + const stageCompletedEntry = entries.find( + (e) => e.event === "stage_completed", + ); + expect(stageCompletedEntry).toBeDefined(); + expect(stageCompletedEntry).not.toHaveProperty("no_cache_tokens"); + }); + + it("aggregates total_input_tokens and total_output_tokens across multiple turns in stage_completed", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + + // Turn 1: 100 input, 40 output + fakeRunner.emit("1", { + event: "session_started", + timestamp: "2026-03-06T00:00:01.000Z", + codexAppServerPid: "1001", + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + }); + fakeRunner.emit("1", { + event: "turn_completed", + timestamp: "2026-03-06T00:00:02.000Z", + codexAppServerPid: "1001", + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + usage: { + inputTokens: 100, + outputTokens: 40, + totalTokens: 140, + cacheReadTokens: 5, + cacheWriteTokens: 3, + }, + message: "turn 1 done", + }); + + // Turn 2: 120 input, 60 output (absolute counters reset per turn) + fakeRunner.emit("1", { + event: "session_started", + timestamp: "2026-03-06T00:00:03.000Z", + codexAppServerPid: "1001", + sessionId: "thread-1-turn-2", + threadId: "thread-1", + turnId: "turn-2", + }); + fakeRunner.emit("1", { + event: "turn_completed", + timestamp: "2026-03-06T00:00:04.000Z", + codexAppServerPid: "1001", + sessionId: "thread-1-turn-2", + threadId: "thread-1", + turnId: "turn-2", + usage: { + inputTokens: 120, + outputTokens: 60, + totalTokens: 180, + cacheReadTokens: 8, + cacheWriteTokens: 4, + }, + message: "turn 2 done", + }); + await host.flushEvents(); + + fakeRunner.resolve("1", { + issue: createIssue({ state: "In Progress" }), + workspace: { + path: "/tmp/workspaces/1", + workspaceKey: "1", + createdNow: true, + }, + runAttempt: { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + workspacePath: "/tmp/workspaces/1", + startedAt: "2026-03-06T00:00:00.000Z", + status: "succeeded", + }, + liveSession: { + sessionId: "thread-1-turn-2", + threadId: "thread-1", + turnId: "turn-2", + codexAppServerPid: "1001", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T00:00:04.000Z", + lastCodexMessage: "turn 2 done", + codexInputTokens: 120, + codexOutputTokens: 60, + codexTotalTokens: 180, + codexCacheReadTokens: 13, + codexCacheWriteTokens: 7, + codexNoCacheTokens: 0, + codexReasoningTokens: 0, + codexTotalInputTokens: 220, + codexTotalOutputTokens: 100, + lastReportedInputTokens: 120, + lastReportedOutputTokens: 60, + lastReportedTotalTokens: 180, + turnCount: 4, + totalStageInputTokens: 220, + totalStageOutputTokens: 100, + totalStageTotalTokens: 320, + totalStageCacheReadTokens: 13, + totalStageCacheWriteTokens: 7, + turnHistory: [], + recentActivity: [], + }, + turnsCompleted: 4, + lastTurn: null, + rateLimits: null, + }); + await host.waitForIdle(); + + const stageCompletedEntry = entries.find( + (e) => e.event === "stage_completed", + ); + expect(stageCompletedEntry).toBeDefined(); + expect(stageCompletedEntry).toMatchObject({ + event: "stage_completed", + total_input_tokens: 220, + total_output_tokens: 100, + total_total_tokens: 320, + total_cache_read_tokens: 13, + total_cache_write_tokens: 7, + turn_count: 4, + }); + }); +}); + +describe("startRuntimeService shutdown", () => { + it("aborts running workers before waiting for idle on shutdown", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + workflowWatcher: null, + runtimeHost: new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }), + }); + + // Wait for the initial poll to dispatch the worker + await service.runtimeHost.flushEvents(); + + // Call shutdown — should abort all workers + await service.shutdown(); + + expect(fakeRunner.abortReasons).toContain( + "Shutdown: aborting running workers.", + ); + }); + + it("proceeds with exit after shutdown timeout if waitForIdle hangs", async () => { + const tracker = createTracker(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + + // A runner that never settles — ignores abort signals + const hangingRunner = { + run(_input: Parameters<FakeAgentRunner["run"]>[0]): Promise<never> { + return new Promise(() => { + /* never resolves */ + }); + }, + }; + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + workflowWatcher: null, + shutdownTimeoutMs: 50, + runtimeHost: new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + agentRunner: hangingRunner, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }), + }); + + // Wait for the initial poll to dispatch the worker + await service.runtimeHost.flushEvents(); + + // Shutdown should complete within a reasonable time despite the hanging runner + const shutdownStart = Date.now(); + await service.shutdown(); + const elapsed = Date.now() - shutdownStart; + + // Should have completed well within a second (timeout is 50ms) + expect(elapsed).toBeLessThan(5_000); + + const timeoutEntry = entries.find( + (e) => e.event === "shutdown_idle_timeout", + ); + expect(timeoutEntry).toBeDefined(); + }); + + it("logs shutdown_complete event with correct fields after clean shutdown", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + workflowWatcher: null, + runtimeHost: new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }), + }); + + // Wait for initial poll to dispatch worker + await service.runtimeHost.flushEvents(); + + // Call shutdown + await service.shutdown(); + + const completeEntry = entries.find((e) => e.event === "shutdown_complete"); + expect(completeEntry).toBeDefined(); + expect(completeEntry).toHaveProperty("workers_aborted"); + expect(typeof completeEntry?.workers_aborted).toBe("number"); + expect(completeEntry).toHaveProperty("timed_out", false); + expect(completeEntry).toHaveProperty("duration_ms"); + expect(typeof completeEntry?.duration_ms).toBe("number"); + }); + + it("logs shutdown_complete with timed_out=true when shutdown timeout fires", async () => { + const tracker = createTracker(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + + // A runner that never settles — ignores abort signals + const hangingRunner = { + run(_input: Parameters<FakeAgentRunner["run"]>[0]): Promise<never> { + return new Promise(() => { + /* never resolves */ + }); + }, + }; + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + workflowWatcher: null, + shutdownTimeoutMs: 50, + runtimeHost: new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + agentRunner: hangingRunner, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }), + }); + + // Wait for initial poll to dispatch worker + await service.runtimeHost.flushEvents(); + + // Shutdown should complete after timeout + await service.shutdown(); + + const completeEntry = entries.find((e) => e.event === "shutdown_complete"); + expect(completeEntry).toBeDefined(); + expect(completeEntry).toHaveProperty("timed_out", true); + expect(completeEntry).toHaveProperty("workers_aborted"); + expect(typeof completeEntry?.duration_ms).toBe("number"); + }); +}); + +describe("startRuntimeService poll_tick_completed", () => { + it("logs poll_tick_completed event after a successful poll", async () => { + const tracker = createTracker({ candidates: [] }); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + workflowWatcher: null, + runtimeHost: new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + agentRunner: new FakeAgentRunner(), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }), + }); + + await service.runtimeHost.flushEvents(); + await service.shutdown(); + + const tickEntry = entries.find((e) => e.event === "poll_tick_completed"); + expect(tickEntry).toBeDefined(); + expect(tickEntry).toHaveProperty("dispatched_count"); + expect(tickEntry).toHaveProperty("running_count"); + expect(tickEntry).toHaveProperty("reconciled_stop_requests"); + expect(typeof tickEntry?.duration_ms).toBe("number"); + }); + + it("logs poll_tick_completed with dispatched_count reflecting newly dispatched issues", async () => { + const tracker = createTracker(); + const entries: StructuredLogEntry[] = []; + const logger = new StructuredLogger([ + { + write(entry) { + entries.push(entry); + }, + }, + ]); + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + workflowWatcher: null, + runtimeHost: new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + agentRunner: new FakeAgentRunner(), + now: () => new Date("2026-03-06T00:00:05.000Z"), + }), + }); + + await service.runtimeHost.flushEvents(); + await service.shutdown(); + + const tickEntry = entries.find((e) => e.event === "poll_tick_completed"); + expect(tickEntry).toBeDefined(); + // One issue was dispatched in the initial poll tick + expect(tickEntry).toHaveProperty("dispatched_count", 1); + }); +}); + +describe("pipeline notifications", () => { + function createMockNotifier() { + const events: PipelineNotificationEvent[] = []; + return { + events, + notify(event: PipelineNotificationEvent) { + events.push(event); + }, + }; + } + + it("fires issue_completed on terminal completion", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: createStagedConfig({ + stages: { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: null, + onApprove: null, + onRework: null, + }, + linearState: null, + }, + }, + }, + }), + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + fakeRunner.resolve("1", createNormalResult()); + await host.waitForIdle(); + + expect(notifier.events).toHaveLength(1); + expect(notifier.events[0]).toMatchObject({ + type: "issue_completed", + issueIdentifier: "ISSUE-1", + issueTitle: "Issue 1", + }); + }); + + it("includes final stage record in executionHistory for single-stage terminal completion", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: createStagedConfig({ + stages: { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: null, + onApprove: null, + onRework: null, + }, + linearState: null, + }, + }, + }, + }), + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + fakeRunner.resolve("1", createNormalResult()); + await host.waitForIdle(); + + expect(notifier.events).toHaveLength(1); + const event = notifier.events[0]!; + expect(event.type).toBe("issue_completed"); + // The history must contain the final stage record — not be empty + const completed = event as Extract< + PipelineNotificationEvent, + { type: "issue_completed" } + >; + expect(completed.executionHistory).toHaveLength(1); + expect(completed.executionHistory[0]).toMatchObject({ + stageName: "implement", + outcome: "normal", + }); + }); + + it("includes all stage records in executionHistory for multi-stage terminal completion", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: createStagedConfig({ + stages: { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: null, + onApprove: null, + onRework: null, + }, + linearState: null, + }, + }, + }, + }), + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + // Stage 1: investigate → completes, advances to implement (continuation) + await host.pollOnce(); + fakeRunner.resolve("1", createNormalResult()); + await host.waitForIdle(); + + // No completion notification yet — stage continuation + expect(notifier.events).toHaveLength(0); + + // Stage 2: fire the continuation retry timer to dispatch "implement" + const retryResult = await host.runRetryTimer("1"); + // The retry timer should have dispatched the worker + expect(retryResult.dispatched).toBe(true); + fakeRunner.resolve("1", createNormalResult()); + await host.waitForIdle(); + + // Now we should see the terminal completion + expect(notifier.events).toHaveLength(1); + const event = notifier.events[0]!; + expect(event.type).toBe("issue_completed"); + // History must include records from BOTH stages + const completed = event as Extract< + PipelineNotificationEvent, + { type: "issue_completed" } + >; + expect(completed.executionHistory).toHaveLength(2); + expect(completed.executionHistory[0]).toMatchObject({ + stageName: "investigate", + }); + expect(completed.executionHistory[1]).toMatchObject({ + stageName: "implement", + outcome: "normal", + }); + }); + + it("does NOT fire issue_completed on stage continuation", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: createStagedConfig({ + stages: { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + implement: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: null, + onApprove: null, + onRework: null, + }, + linearState: null, + }, + }, + }, + }), + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + fakeRunner.resolve("1", createNormalResult()); + await host.waitForIdle(); + + // Stage advanced from investigate → implement, scheduled continuation retry. + // No notification should fire. + expect(notifier.events).toHaveLength(0); + }); + + it("fires issue_failed when retries are exhausted", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: { + ...createConfig(), + agent: { ...createConfig().agent, maxRetryAttempts: 0 }, + }, + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + fakeRunner.reject("1", new Error("agent crashed")); + await host.waitForIdle(); + + expect(notifier.events).toHaveLength(1); + expect(notifier.events[0]).toMatchObject({ + type: "issue_failed", + issueIdentifier: "ISSUE-1", + retriesExhausted: true, + }); + }); + + it("fires stall_killed when a stall timeout aborts a worker", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + + // Simulate the stall timeout triggering reconcileStalledRuns → stopRunningIssue + tracker.setStateSnapshots([ + { id: "1", identifier: "ISSUE-1", state: "In Progress" }, + ]); + const reconcileTick = await host.pollOnce(); + + // Manually mark this as a stall_timeout stop request to simulate the flow + // biome-ignore lint/suspicious/noExplicitAny: accessing private field for test setup + const worker = (host as any).workers.get("1"); + if (worker) { + worker.stopRequest = { + issueId: "1", + issueIdentifier: "ISSUE-1", + cleanupWorkspace: false, + reason: "stall_timeout", + }; + worker.controller.abort("Stopped due to stall_timeout."); + } + await host.waitForIdle(); + + const stallEvents = notifier.events.filter( + (e) => e.type === "stall_killed", + ); + expect(stallEvents).toHaveLength(1); + expect(stallEvents[0]).toMatchObject({ + type: "stall_killed", + issueIdentifier: "ISSUE-1", + }); + }); + + it("fires infra_error when worker exits abnormally with 0 turns", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + // Reject immediately — no turns completed, no session events + fakeRunner.reject("1", new Error("Failed to start agent process")); + await host.waitForIdle(); + + const infraEvents = notifier.events.filter((e) => e.type === "infra_error"); + expect(infraEvents).toHaveLength(1); + expect(infraEvents[0]).toMatchObject({ + type: "infra_error", + issueIdentifier: "ISSUE-1", + errorReason: "Failed to start agent process", + }); + }); + + it("does not fire infra_error when worker exits abnormally with turns completed", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifier(); + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + notifier, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + // Emit session_started (increments turnCount to 1) then turn_completed + fakeRunner.emit("1", { + event: "session_started", + timestamp: "2026-03-06T00:00:01.000Z", + codexAppServerPid: "1001", + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + }); + fakeRunner.emit("1", { + event: "turn_completed", + timestamp: "2026-03-06T00:00:02.000Z", + codexAppServerPid: "1001", + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + message: "turn done", + }); + await host.flushEvents(); + + fakeRunner.reject("1", new Error("agent crashed mid-run")); + await host.waitForIdle(); + + const infraEvents = notifier.events.filter((e) => e.type === "infra_error"); + expect(infraEvents).toHaveLength(0); + }); + + it("fires no notification when notifier is null", async () => { + const tracker = createTracker(); + const fakeRunner = new FakeAgentRunner(); + // No notifier passed — should not throw + const host = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + createAgentRunner: ({ onEvent }) => { + fakeRunner.onEvent = onEvent; + return fakeRunner; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + await host.pollOnce(); + fakeRunner.reject("1", new Error("crash")); + await host.waitForIdle(); + + // If we got here without throwing, the test passes + expect(host.notifier).toBeNull(); + }); +}); + +describe("pipeline notifications in startRuntimeService", () => { + it("fires pipeline_started and pipeline_stopped notifications", async () => { + const tracker = createTracker({ candidates: [] }); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifierForService(); + const logger = new StructuredLogger([{ write() {} }]); + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + notifier, + workflowWatcher: null, + runtimeHost: new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + notifier, + agentRunner: fakeRunner, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }), + }); + + await service.runtimeHost.flushEvents(); + + const startedEvents = notifier.events.filter( + (e) => e.type === "pipeline_started", + ); + expect(startedEvents).toHaveLength(1); + expect(startedEvents[0]).toMatchObject({ + type: "pipeline_started", + }); + + await service.shutdown(); + + const stoppedEvents = notifier.events.filter( + (e) => e.type === "pipeline_stopped", + ); + expect(stoppedEvents).toHaveLength(1); + expect(stoppedEvents[0]).toMatchObject({ + type: "pipeline_stopped", + completedCount: 0, + failedCount: 0, + }); + }); + + it("pipeline_stopped.completedCount counts only terminal completions", async () => { + const tracker = createTracker({ candidates: [] }); + const fakeRunner = new FakeAgentRunner(); + const notifier = createMockNotifierForService(); + const logger = new StructuredLogger([{ write() {} }]); + + const runtimeHost = new OrchestratorRuntimeHost({ + config: createConfig(), + tracker, + logger, + notifier, + agentRunner: fakeRunner, + now: () => new Date("2026-03-06T00:00:05.000Z"), + }); + + const service = await startRuntimeService({ + config: createConfig(), + tracker, + logger, + notifier, + workflowWatcher: null, + runtimeHost, + }); + + await service.runtimeHost.flushEvents(); + + // Manipulate state: issue "A" terminally completed, issue "B" mid-continuation + const state = runtimeHost.getState(); + state.completed.add("A"); + // "B" is mid-continuation — it has a retryAttempts entry but is NOT in completed + // (after the fix, continuations no longer add to completed) + state.retryAttempts.B = { + issueId: "B", + identifier: "ISSUE-B", + attempt: 1, + dueAtMs: Date.parse("2026-03-06T00:01:00.000Z"), + timerHandle: null, + error: null, + delayType: "continuation", + }; + + await service.shutdown(); + + const stoppedEvents = notifier.events.filter( + (e) => e.type === "pipeline_stopped", + ); + expect(stoppedEvents).toHaveLength(1); + expect(stoppedEvents[0]).toMatchObject({ + type: "pipeline_stopped", + completedCount: 1, + failedCount: 0, + }); + }); + + function createMockNotifierForService() { + const events: PipelineNotificationEvent[] = []; + return { + events, + notify(event: PipelineNotificationEvent) { + events.push(event); + }, + }; + } +}); + +describe("extractProductName", () => { + it("extracts product name from WORKFLOW-<product>.md pattern", () => { + expect(extractProductName("/path/to/WORKFLOW-symphony.md")).toBe( + "symphony", + ); + }); + + it("returns base name for plain WORKFLOW.md", () => { + expect(extractProductName("/path/to/WORKFLOW.md")).toBe("WORKFLOW"); + }); + + it("handles paths without directory separators", () => { + expect(extractProductName("WORKFLOW-jony.md")).toBe("jony"); + }); +}); + +class FakeAgentRunner { + onEvent: ((event: AgentRunnerEvent) => void) | undefined; + readonly runs = new Map< + string, + { + resolve: (result: AgentRunResult) => void; + reject: (error: Error) => void; + } + >(); + readonly abortReasons: string[] = []; + + async run(input: { + issue: Issue; + attempt: number | null; + signal?: AbortSignal; + }): Promise<AgentRunResult> { + return await new Promise<AgentRunResult>((resolve, reject) => { + this.runs.set(input.issue.id, { resolve, reject }); + input.signal?.addEventListener( + "abort", + () => { + const reason = + typeof input.signal?.reason === "string" + ? input.signal.reason + : "aborted"; + this.abortReasons.push(reason); + reject(new Error(reason)); + }, + { once: true }, + ); + }); + } + + emit( + issueId: string, + event: Omit< + AgentRunnerEvent, + "issueId" | "issueIdentifier" | "attempt" | "workspacePath" | "turnCount" + > & + Partial<Pick<AgentRunnerEvent, "turnCount">>, + ): void { + this.onEvent?.({ + ...event, + issueId, + issueIdentifier: "ISSUE-1", + attempt: null, + workspacePath: "/tmp/workspaces/1", + turnCount: event.turnCount ?? 0, + }); + } + + resolve(issueId: string, result: AgentRunResult): void { + const run = this.runs.get(issueId); + if (run === undefined) { + throw new Error(`No fake run registered for ${issueId}.`); + } + this.runs.delete(issueId); + run.resolve(result); + } + + reject(issueId: string, error: Error): void { + const run = this.runs.get(issueId); + if (run === undefined) { + throw new Error(`No fake run registered for ${issueId}.`); + } + this.runs.delete(issueId); + run.reject(error); + } +} + +function createTracker(input?: { candidates?: Issue[] }) { + let candidates = input?.candidates ?? [createIssue()]; + let stateSnapshots: IssueStateSnapshot[] = [ + { id: "1", identifier: "ISSUE-1", state: "In Progress" }, + ]; + + const tracker: IssueTracker & { + setCandidates(next: Issue[]): void; + setStateSnapshots(next: IssueStateSnapshot[]): void; + } = { + fetchCandidateIssues: vi.fn(async () => candidates), + fetchIssuesByStates: vi.fn(async () => []), + fetchIssueStatesByIds: vi.fn(async () => stateSnapshots), + setCandidates(next) { + candidates = next; + }, + setStateSnapshots(next) { + stateSnapshots = next; + }, + }; + + return tracker; +} + +function createIssue(overrides?: Partial<Issue>): Issue { + return { + id: "1", + identifier: "ISSUE-1", + title: "Issue 1", + description: null, + priority: 1, + state: "In Progress", + branchName: null, + url: null, + labels: [], + blockedBy: [], + createdAt: "2026-03-01T00:00:00.000Z", + updatedAt: "2026-03-01T00:00:00.000Z", + ...overrides, + }; +} + +function createConfig(): ResolvedWorkflowConfig { + return { + workflowPath: "/tmp/WORKFLOW.md", + promptTemplate: "Prompt", + tracker: { + kind: "linear", + endpoint: "https://api.linear.app/graphql", + apiKey: "token", + projectSlug: "project", + activeStates: ["Todo", "In Progress", "In Review"], + terminalStates: ["Done", "Canceled"], + }, + polling: { + intervalMs: 30_000, + }, + workspace: { + root: "/tmp/workspaces", + }, + hooks: { + afterCreate: null, + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 30_000, + }, + agent: { + maxConcurrentAgents: 2, + maxTurns: 5, + maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, + maxConcurrentAgentsByState: {}, + }, + codex: { + command: "codex-app-server", approvalPolicy: "never", threadSandbox: null, turnSandboxPolicy: null, @@ -425,11 +1962,102 @@ function createConfig(): ResolvedWorkflowConfig { }, server: { port: null, + slackNotifyChannel: null, }, observability: { dashboardEnabled: true, refreshMs: 1_000, renderIntervalMs: 16, }, + runner: { + kind: "codex", + model: null, + }, + stages: null, + escalationState: null, + }; +} + +function createStagedConfig( + overrides?: Partial<ResolvedWorkflowConfig>, +): ResolvedWorkflowConfig { + return { + ...createConfig(), + stages: { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: null, + onApprove: null, + onRework: null, + }, + linearState: null, + }, + }, + }, + ...overrides, + }; +} + +function createNormalResult(): AgentRunResult { + return { + issue: createIssue({ state: "In Progress" }), + workspace: { + path: "/tmp/workspaces/1", + workspaceKey: "1", + createdNow: true, + }, + runAttempt: { + issueId: "1", + issueIdentifier: "ISSUE-1", + attempt: null, + workspacePath: "/tmp/workspaces/1", + startedAt: "2026-03-06T00:00:00.000Z", + status: "succeeded", + }, + liveSession: { + sessionId: "thread-1-turn-1", + threadId: "thread-1", + turnId: "turn-1", + codexAppServerPid: "1001", + lastCodexEvent: "turn_completed", + lastCodexTimestamp: "2026-03-06T00:00:02.000Z", + lastCodexMessage: "done", + codexInputTokens: 100, + codexOutputTokens: 50, + codexTotalTokens: 150, + codexCacheReadTokens: 0, + codexCacheWriteTokens: 0, + codexNoCacheTokens: 0, + codexReasoningTokens: 0, + codexTotalInputTokens: 100, + codexTotalOutputTokens: 50, + lastReportedInputTokens: 100, + lastReportedOutputTokens: 50, + lastReportedTotalTokens: 150, + turnCount: 1, + totalStageInputTokens: 0, + totalStageOutputTokens: 0, + totalStageTotalTokens: 0, + totalStageCacheReadTokens: 0, + totalStageCacheWriteTokens: 0, + turnHistory: [], + recentActivity: [], + }, + turnsCompleted: 1, + lastTurn: null, + rateLimits: null, }; } diff --git a/tests/orchestrator/stages.test.ts b/tests/orchestrator/stages.test.ts new file mode 100644 index 00000000..d82a82f8 --- /dev/null +++ b/tests/orchestrator/stages.test.ts @@ -0,0 +1,1166 @@ +import { describe, expect, it, vi } from "vitest"; + +import type { + ResolvedWorkflowConfig, + StageDefinition, + StagesConfig, +} from "../../src/config/types.js"; +import type { Issue } from "../../src/domain/model.js"; +import { + OrchestratorCore, + type OrchestratorCoreOptions, +} from "../../src/orchestrator/core.js"; +import type { EnsembleGateResult } from "../../src/orchestrator/gate-handler.js"; +import type { IssueTracker } from "../../src/tracker/tracker.js"; + +describe("orchestrator stage machine", () => { + it("dispatches with stage info when stages are configured", async () => { + const spawnCalls: Array<{ + stageName: string | null; + stageType: string | null; + }> = []; + const orchestrator = createStagedOrchestrator({ + onSpawn: (input) => { + spawnCalls.push({ + stageName: input.stageName, + stageType: input.stage?.type ?? null, + }); + }, + }); + + await orchestrator.pollTick(); + + expect(spawnCalls).toEqual([ + { stageName: "investigate", stageType: "agent" }, + ]); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + }); + + it("advances to next stage on normal worker exit", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + + // Normal exit from investigate stage + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + }); + + // Should advance to "implement" + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + }); + + it("completes when reaching terminal stage", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createSimpleTwoStageConfig(), + }); + + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + + // Normal exit advances to "done" (terminal) + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + }); + + // Should be completed — no retry scheduled, stage cleaned up + expect(retryEntry).toBeNull(); + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().completed.has("1")).toBe(true); + }); + + it("does not dispatch workers for gate stages", async () => { + const spawnCalls: unknown[] = []; + const orchestrator = createStagedOrchestrator({ + stages: createGateWorkflowConfig(), + onSpawn: () => { + spawnCalls.push(true); + }, + }); + + // First dispatch puts issue in "implement" (agent stage) + await orchestrator.pollTick(); + expect(spawnCalls).toHaveLength(1); + + // Normal exit advances to "review" (gate stage) + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Retry timer fires — should try to dispatch but gate stage blocks it + const retryResult = await orchestrator.onRetryTimer("1"); + // Gate stages don't spawn workers + expect(retryResult.dispatched).toBe(false); + }); + + it("approves a gate stage and advances to on_approve target", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createGateWorkflowConfig(), + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Approve the gate + const nextStage = orchestrator.approveGate("1"); + expect(nextStage).toBe("merge"); + expect(orchestrator.getState().issueStages["1"]).toBe("merge"); + }); + + it("reworks a gate stage and sends issue back to rework target", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createGateWorkflowConfig(), + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Reject (rework) the gate + const reworkTarget = orchestrator.reworkGate("1"); + expect(reworkTarget).toBe("implement"); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + }); + + it("escalates when rework count exceeds max_rework limit", async () => { + const base = createGateWorkflowConfig(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 2 }, + }, + }; + + const orchestrator = createStagedOrchestrator({ stages }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + + // Rework 1 + orchestrator.reworkGate("1"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + + // Rework 2 + orchestrator.getState().issueStages["1"] = "review"; + orchestrator.reworkGate("1"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(2); + + // Rework 3 — should escalate since max_rework = 2 + orchestrator.getState().issueStages["1"] = "review"; + const result = orchestrator.reworkGate("1"); + expect(result).toBe("escalated"); + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + expect(orchestrator.getState().failed.has("1")).toBe(true); + }); + + it("preserves flat dispatch behavior when no stages configured", async () => { + const spawnCalls: Array<{ + stageName: string | null; + stageType: string | null; + }> = []; + const orchestrator = createStagedOrchestrator({ + stages: null, + onSpawn: (input) => { + spawnCalls.push({ + stageName: input.stageName, + stageType: input.stage?.type ?? null, + }); + }, + }); + + await orchestrator.pollTick(); + + expect(spawnCalls).toEqual([{ stageName: null, stageType: null }]); + expect(orchestrator.getState().issueStages).toEqual({}); + }); + + it("flat dispatch normal exit still schedules continuation retry", async () => { + const orchestrator = createStagedOrchestrator({ stages: null }); + + await orchestrator.pollTick(); + const retryEntry = orchestrator.onWorkerExit({ + issueId: "1", + outcome: "normal", + }); + + expect(retryEntry).not.toBeNull(); + expect(retryEntry!.attempt).toBe(1); + expect(retryEntry!.error).toBeNull(); + }); + + it("tracks multiple issues in different stages independently", async () => { + const orchestrator = createStagedOrchestrator({ + candidates: [ + createIssue({ id: "1", identifier: "ISSUE-1" }), + createIssue({ id: "2", identifier: "ISSUE-2" }), + ], + }); + + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + expect(orchestrator.getState().issueStages["2"]).toBe("investigate"); + + // Advance issue 1 only + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueStages["2"]).toBe("investigate"); + }); + + it("abnormal exit does not advance stage", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + + orchestrator.onWorkerExit({ + issueId: "1", + outcome: "abnormal", + reason: "crashed", + }); + + // Stage should remain unchanged + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + }); + + it("reworks an agent-type stage with onRework and sends issue back to rework target", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createAgentReviewWorkflowConfig(), + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Dispatch review agent + await orchestrator.onRetryTimer("1"); + + // Directly call reworkGate on an agent-type stage with onRework + const reworkTarget = orchestrator.reworkGate("1"); + expect(reworkTarget).toBe("implement"); + expect(orchestrator.getState().issueStages["1"]).toBe("implement"); + expect(orchestrator.getState().issueReworkCounts["1"]).toBe(1); + }); + + it("returns null from reworkGate for agent-type stage without onRework", async () => { + const orchestrator = createStagedOrchestrator(); + + await orchestrator.pollTick(); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + + // Investigate stage has no onRework — reworkGate should return null + const reworkTarget = orchestrator.reworkGate("1"); + expect(reworkTarget).toBeNull(); + expect(orchestrator.getState().issueStages["1"]).toBe("investigate"); + }); + + it("cleans up stage tracking when issue completes through terminal", async () => { + const orchestrator = createStagedOrchestrator({ + stages: createSimpleTwoStageConfig(), + }); + + await orchestrator.pollTick(); + + // Set a rework count to verify cleanup + orchestrator.getState().issueReworkCounts["1"] = 2; + + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + expect(orchestrator.getState().issueReworkCounts["1"]).toBeUndefined(); + }); +}); + +describe("updateIssueState integration", () => { + it("calls updateIssueState when dispatching an agent stage with linearState", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const stages = createThreeStageConfigWithLinearStates(); + + const orchestrator = createStagedOrchestrator({ + stages, + updateIssueState, + }); + + await orchestrator.pollTick(); + + expect(updateIssueState).toHaveBeenCalledWith( + "1", + "ISSUE-1", + "In Progress", + ); + }); + + it("does not call updateIssueState when stage has null linearState", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createThreeStageConfig(), + updateIssueState, + }); + + await orchestrator.pollTick(); + + expect(updateIssueState).not.toHaveBeenCalled(); + }); + + it("calls updateIssueState when dispatching a gate stage with linearState", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const stages = createGateWorkflowConfigWithLinearStates(); + + const orchestrator = createStagedOrchestrator({ + stages, + updateIssueState, + }); + + // First dispatch puts issue in "implement" (agent stage with linearState) + await orchestrator.pollTick(); + expect(updateIssueState).toHaveBeenCalledWith( + "1", + "ISSUE-1", + "In Progress", + ); + + // Normal exit advances to "review" (gate stage) + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Retry timer fires — gate stage dispatch should call updateIssueState with "In Review" + const retryResult = await orchestrator.onRetryTimer("1"); + expect(retryResult.dispatched).toBe(false); + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "In Review"); + }); + + it("calls updateIssueState on escalation when escalationState is configured", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const runEnsembleGate = vi.fn().mockResolvedValue({ + aggregate: "fail", + results: [], + comment: "Code quality issues found.", + } satisfies EnsembleGateResult); + + const base = createGateWorkflowConfigWithLinearStates(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 0 }, + }, + }; + + const orchestrator = createStagedOrchestrator({ + stages, + escalationState: "Blocked", + updateIssueState, + runEnsembleGate, + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + + // Retry timer fires — gate stage runs ensemble gate which fails → escalates + await orchestrator.onRetryTimer("1"); + // Wait for the async handleEnsembleGate to complete + await new Promise((resolve) => setTimeout(resolve, 50)); + + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Blocked"); + }); + + it("does not call updateIssueState on escalation when escalationState is null", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + const runEnsembleGate = vi.fn().mockResolvedValue({ + aggregate: "fail", + results: [], + comment: "Code quality issues found.", + } satisfies EnsembleGateResult); + + const base = createGateWorkflowConfigWithLinearStates(); + const stages: StagesConfig = { + ...base, + stages: { + ...base.stages, + review: { ...base.stages.review!, maxRework: 0 }, + }, + }; + + const orchestrator = createStagedOrchestrator({ + stages, + escalationState: null, + updateIssueState, + runEnsembleGate, + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + + await orchestrator.onRetryTimer("1"); + await new Promise((resolve) => setTimeout(resolve, 50)); + + // Only called for dispatch linearStates, not for escalation + const escalationCalls = updateIssueState.mock.calls.filter( + (call: unknown[]) => call[2] === "Blocked", + ); + expect(escalationCalls).toHaveLength(0); + }); + + it("still dispatches successfully if updateIssueState throws", async () => { + const updateIssueState = vi + .fn() + .mockRejectedValue(new Error("Linear API down")); + + const orchestrator = createStagedOrchestrator({ + stages: createThreeStageConfigWithLinearStates(), + updateIssueState, + }); + + const result = await orchestrator.pollTick(); + + // Dispatch should succeed despite updateIssueState failure + expect(result.dispatchedIssueIds).toEqual(["1"]); + expect(Object.keys(orchestrator.getState().running)).toEqual(["1"]); + expect(updateIssueState).toHaveBeenCalledWith( + "1", + "ISSUE-1", + "In Progress", + ); + }); + + it("calls updateIssueState with terminal stage linearState when issue reaches terminal", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createTwoStageConfigWithTerminalLinearState(), + updateIssueState, + }); + + await orchestrator.pollTick(); + + // Normal exit from implement → done (terminal with linearState "Done") + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + + // Wait for the async updateIssueState call to complete + await new Promise((resolve) => setTimeout(resolve, 50)); + + expect(orchestrator.getState().completed.has("1")).toBe(true); + expect(orchestrator.getState().issueStages["1"]).toBeUndefined(); + // Should have been called twice: once for dispatch ("In Progress") and once for terminal ("Done") + expect(updateIssueState).toHaveBeenCalledWith( + "1", + "ISSUE-1", + "In Progress", + ); + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Done"); + }); + + it("does not call updateIssueState when terminal stage has null linearState", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createSimpleTwoStageConfig(), + updateIssueState, + }); + + await orchestrator.pollTick(); + updateIssueState.mockClear(); + + // Normal exit from implement → done (terminal with no linearState) + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + + await new Promise((resolve) => setTimeout(resolve, 50)); + + expect(orchestrator.getState().completed.has("1")).toBe(true); + // updateIssueState should NOT have been called for the terminal stage + expect(updateIssueState).not.toHaveBeenCalled(); + }); + + it("calls updateIssueState when gate approves to terminal stage with linearState", async () => { + const updateIssueState = vi.fn().mockResolvedValue(undefined); + + const orchestrator = createStagedOrchestrator({ + stages: createGateToTerminalConfigWithLinearState(), + updateIssueState, + }); + + await orchestrator.pollTick(); + orchestrator.onWorkerExit({ issueId: "1", outcome: "normal" }); + expect(orchestrator.getState().issueStages["1"]).toBe("review"); + + // Approve the gate — sets issue to "done" (terminal with linearState "Done") + const nextStage = orchestrator.approveGate("1"); + expect(nextStage).toBe("done"); + expect(orchestrator.getState().issueStages["1"]).toBe("done"); + + // Trigger the continuation so dispatchIssue hits the terminal short-circuit + const retryResult = await orchestrator.onRetryTimer("1"); + expect(retryResult.dispatched).toBe(false); + + // Wait for the async updateIssueState call to complete + await new Promise((resolve) => setTimeout(resolve, 50)); + + // Should have been called twice: once for dispatch ("In Progress") and once for terminal ("Done") + expect(orchestrator.getState().completed.has("1")).toBe(true); + expect(updateIssueState).toHaveBeenCalledWith( + "1", + "ISSUE-1", + "In Progress", + ); + expect(updateIssueState).toHaveBeenCalledWith("1", "ISSUE-1", "Done"); + }); +}); + +// --- Helpers --- + +function createStagedOrchestrator(overrides?: { + stages?: StagesConfig | null; + candidates?: Issue[]; + escalationState?: string | null; + updateIssueState?: OrchestratorCoreOptions["updateIssueState"]; + runEnsembleGate?: OrchestratorCoreOptions["runEnsembleGate"]; + postComment?: OrchestratorCoreOptions["postComment"]; + onSpawn?: (input: { + issue: Issue; + attempt: number | null; + stage: StageDefinition | null; + stageName: string | null; + }) => void; +}) { + const stages = + overrides?.stages !== undefined + ? overrides.stages + : createThreeStageConfig(); + + const tracker = createTracker({ + candidates: overrides?.candidates ?? [ + createIssue({ id: "1", identifier: "ISSUE-1" }), + ], + }); + + const options: OrchestratorCoreOptions = { + config: createConfig({ + stages, + ...(overrides?.escalationState !== undefined + ? { escalationState: overrides.escalationState } + : {}), + }), + tracker, + spawnWorker: async (input) => { + overrides?.onSpawn?.(input); + return { + workerHandle: { pid: 1001 }, + monitorHandle: { ref: "monitor-1" }, + }; + }, + now: () => new Date("2026-03-06T00:00:05.000Z"), + ...(overrides?.updateIssueState !== undefined + ? { updateIssueState: overrides.updateIssueState } + : {}), + ...(overrides?.runEnsembleGate !== undefined + ? { runEnsembleGate: overrides.runEnsembleGate } + : {}), + ...(overrides?.postComment !== undefined + ? { postComment: overrides.postComment } + : {}), + }; + + return new OrchestratorCore(options); +} + +function createThreeStageConfig(): StagesConfig { + return { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: "claude-code", + model: "claude-opus-4", + prompt: "investigate.liquid", + maxTurns: 8, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createSimpleTwoStageConfig(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createTwoStageConfigWithTerminalLinearState(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: "In Progress", + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; +} + +function createThreeStageConfigWithLinearStates(): StagesConfig { + return { + initialStage: "investigate", + fastTrack: null, + stages: { + investigate: { + type: "agent", + runner: "claude-code", + model: "claude-opus-4", + prompt: "investigate.liquid", + maxTurns: 8, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "implement", + onApprove: null, + onRework: null, + }, + linearState: "In Progress", + }, + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: "In Progress", + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createGateWorkflowConfigWithLinearStates(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: "In Progress", + }, + review: { + type: "gate", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble", + maxRework: 3, + reviewers: [], + transitions: { + onComplete: null, + onApprove: "merge", + onRework: "implement", + }, + linearState: "In Review", + }, + merge: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "merge.liquid", + maxTurns: 5, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createGateWorkflowConfig(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "gate", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble", + maxRework: 3, + reviewers: [], + transitions: { + onComplete: null, + onApprove: "merge", + onRework: "implement", + }, + linearState: null, + }, + merge: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "merge.liquid", + maxTurns: 5, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createGateToTerminalConfigWithLinearState(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: "In Progress", + }, + review: { + type: "gate", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: "ensemble", + maxRework: 3, + reviewers: [], + transitions: { + onComplete: null, + onApprove: "done", + onRework: "implement", + }, + linearState: "In Review", + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: "Done", + }, + }, + }; +} + +function createAgentReviewWorkflowConfig(): StagesConfig { + return { + initialStage: "implement", + fastTrack: null, + stages: { + implement: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "implement.liquid", + maxTurns: 30, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "review", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + review: { + type: "agent", + runner: "claude-code", + model: "claude-opus-4-6", + prompt: "review.liquid", + maxTurns: 15, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: 3, + reviewers: [], + transitions: { + onComplete: "merge", + onApprove: null, + onRework: "implement", + }, + linearState: null, + }, + merge: { + type: "agent", + runner: "claude-code", + model: "claude-sonnet-4-5", + prompt: "merge.liquid", + maxTurns: 5, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { + onComplete: "done", + onApprove: null, + onRework: null, + }, + linearState: null, + }, + done: { + type: "terminal", + runner: null, + model: null, + prompt: null, + maxTurns: null, + timeoutMs: null, + concurrency: null, + gateType: null, + maxRework: null, + reviewers: [], + transitions: { onComplete: null, onApprove: null, onRework: null }, + linearState: null, + }, + }, + }; +} + +function createTracker(input?: { + candidates?: Issue[]; +}): IssueTracker { + return { + async fetchCandidateIssues() { + return ( + input?.candidates ?? [createIssue({ id: "1", identifier: "ISSUE-1" })] + ); + }, + async fetchIssuesByStates() { + return []; + }, + async fetchIssueStatesByIds() { + return ( + input?.candidates?.map((issue) => ({ + id: issue.id, + identifier: issue.identifier, + state: issue.state, + })) ?? [{ id: "1", identifier: "ISSUE-1", state: "In Progress" }] + ); + }, + }; +} + +function createConfig(overrides?: { + stages?: StagesConfig | null; + escalationState?: string | null; +}): ResolvedWorkflowConfig { + return { + workflowPath: "/tmp/WORKFLOW.md", + promptTemplate: "Prompt", + tracker: { + kind: "linear", + endpoint: "https://api.linear.app/graphql", + apiKey: "token", + projectSlug: "project", + activeStates: ["Todo", "In Progress", "In Review"], + terminalStates: ["Done", "Canceled"], + }, + polling: { + intervalMs: 30_000, + }, + workspace: { + root: "/tmp/workspaces", + }, + hooks: { + afterCreate: null, + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 30_000, + }, + agent: { + maxConcurrentAgents: 2, + maxTurns: 5, + maxRetryBackoffMs: 300_000, + maxRetryAttempts: 5, + maxConcurrentAgentsByState: {}, + }, + runner: { + kind: "codex", + model: null, + }, + codex: { + command: "codex-app-server", + approvalPolicy: "never", + threadSandbox: null, + turnSandboxPolicy: null, + turnTimeoutMs: 300_000, + readTimeoutMs: 30_000, + stallTimeoutMs: 300_000, + }, + server: { + port: null, + slackNotifyChannel: null, + }, + observability: { + dashboardEnabled: true, + refreshMs: 1_000, + renderIntervalMs: 16, + }, + stages: overrides?.stages !== undefined ? overrides.stages : null, + escalationState: overrides?.escalationState ?? null, + }; +} + +function createIssue(overrides?: Partial<Issue>): Issue { + return { + id: overrides?.id ?? "1", + identifier: overrides?.identifier ?? "ISSUE-1", + title: overrides?.title ?? "Example issue", + description: overrides?.description ?? null, + priority: overrides?.priority ?? 1, + state: overrides?.state ?? "In Progress", + branchName: overrides?.branchName ?? null, + url: overrides?.url ?? null, + labels: overrides?.labels ?? [], + blockedBy: overrides?.blockedBy ?? [], + createdAt: overrides?.createdAt ?? "2026-03-01T00:00:00.000Z", + updatedAt: overrides?.updatedAt ?? "2026-03-01T00:00:00.000Z", + }; +} diff --git a/tests/runners/claude-code-runner.test.ts b/tests/runners/claude-code-runner.test.ts new file mode 100644 index 00000000..da4c6e48 --- /dev/null +++ b/tests/runners/claude-code-runner.test.ts @@ -0,0 +1,668 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +import type { CodexClientEvent } from "../../src/codex/app-server-client.js"; +import { + ClaudeCodeRunner, + resolveClaudeModelId, +} from "../../src/runners/claude-code-runner.js"; + +// Mock the AI SDK generateText +vi.mock("ai", () => ({ + generateText: vi.fn(), +})); + +vi.mock("ai-sdk-provider-claude-code", () => ({ + claudeCode: vi.fn(() => "mock-claude-model"), +})); + +// Mock node:fs for heartbeat tests +vi.mock("node:fs", () => ({ + statSync: vi.fn(() => ({ mtimeMs: 1000 })), +})); + +import { statSync } from "node:fs"; +import { generateText } from "ai"; +import { claudeCode } from "ai-sdk-provider-claude-code"; + +const mockGenerateText = vi.mocked(generateText); +const mockClaudeCode = vi.mocked(claudeCode); +const mockStatSync = vi.mocked(statSync); + +describe("ClaudeCodeRunner", () => { + it("implements AgentRunnerCodexClient interface (startSession, continueTurn, close)", () => { + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + }); + + expect(typeof runner.startSession).toBe("function"); + expect(typeof runner.continueTurn).toBe("function"); + expect(typeof runner.close).toBe("function"); + }); + + it("calls generateText with claude-code model on startSession", async () => { + mockGenerateText.mockResolvedValueOnce({ + text: "Hello from Claude", + usage: { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never); + + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "opus", + }); + + const result = await runner.startSession({ + prompt: "Fix the bug", + title: "ABC-123: Fix the bug", + }); + + expect(mockClaudeCode).toHaveBeenCalledWith("opus", { + cwd: "/tmp/workspace", + permissionMode: "bypassPermissions", + }); + expect(mockGenerateText).toHaveBeenCalledWith( + expect.objectContaining({ + model: "mock-claude-model", + prompt: "Fix the bug", + }), + ); + expect(result.status).toBe("completed"); + expect(result.message).toBe("Hello from Claude"); + expect(result.usage).toEqual({ + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }); + }); + + it("emits session_started and turn_completed events", async () => { + mockGenerateText.mockResolvedValueOnce({ + text: "Done", + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + }); + + await runner.startSession({ prompt: "test", title: "test" }); + + expect(events).toHaveLength(2); + expect(events[0]!.event).toBe("session_started"); + expect(events[0]!.codexAppServerPid).toBeNull(); + expect(events[1]!.event).toBe("turn_completed"); + expect(events[1]!.usage).toEqual({ + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + }); + }); + + it("emits turn_failed on error and returns failed status", async () => { + mockGenerateText.mockRejectedValueOnce(new Error("Rate limit exceeded")); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + }); + + const result = await runner.startSession({ + prompt: "test", + title: "test", + }); + + expect(result.status).toBe("failed"); + expect(result.message).toBe("Rate limit exceeded"); + expect(result.usage).toBeNull(); + expect(events.map((e) => e.event)).toEqual([ + "session_started", + "turn_failed", + ]); + }); + + it("increments turn count across startSession and continueTurn", async () => { + const mockResult = { + text: "ok", + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never; + mockGenerateText + .mockResolvedValueOnce(mockResult) + .mockResolvedValueOnce(mockResult); + + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + }); + + const first = await runner.startSession({ prompt: "p1", title: "t" }); + const second = await runner.continueTurn("p2", "t"); + + expect(first.turnId).toBe("turn-1"); + expect(second.turnId).toBe("turn-2"); + // Session IDs share the same thread + expect(first.threadId).toBe(second.threadId); + }); + + it("handles undefined token values from AI SDK gracefully", async () => { + mockGenerateText.mockResolvedValueOnce({ + text: "result", + usage: { + inputTokens: undefined, + outputTokens: undefined, + totalTokens: undefined, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never); + + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + }); + + const result = await runner.startSession({ prompt: "p", title: "t" }); + expect(result.usage).toEqual({ + inputTokens: 0, + outputTokens: 0, + totalTokens: 0, + }); + // detail fields should be absent (not 0) when provider doesn't report them + expect(result.usage?.cacheReadTokens).toBeUndefined(); + expect(result.usage?.cacheWriteTokens).toBeUndefined(); + expect(result.usage?.noCacheTokens).toBeUndefined(); + expect(result.usage?.reasoningTokens).toBeUndefined(); + }); + + it("extracts cache and reasoning token details from inputTokenDetails / outputTokenDetails", async () => { + mockGenerateText.mockResolvedValueOnce({ + text: "result", + usage: { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + inputTokenDetails: { + cacheReadTokens: 20, + cacheWriteTokens: 10, + noCacheTokens: 70, + }, + outputTokenDetails: { + textTokens: 40, + reasoningTokens: 10, + }, + }, + } as never); + + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + }); + + const result = await runner.startSession({ prompt: "p", title: "t" }); + expect(result.usage?.cacheReadTokens).toBe(20); + expect(result.usage?.cacheWriteTokens).toBe(10); + expect(result.usage?.noCacheTokens).toBe(70); + expect(result.usage?.reasoningTokens).toBe(10); + }); + + it("maps full Anthropic model IDs to short provider names", async () => { + mockGenerateText.mockResolvedValueOnce({ + text: "ok", + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never); + + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "claude-sonnet-4-5", + }); + + await runner.startSession({ prompt: "test", title: "test" }); + + // Should resolve "claude-sonnet-4-5" → "sonnet" + expect(mockClaudeCode).toHaveBeenCalledWith("sonnet", { + cwd: "/tmp/workspace", + permissionMode: "bypassPermissions", + }); + }); + + it("passes abortSignal to generateText for subprocess cleanup", async () => { + mockGenerateText.mockResolvedValueOnce({ + text: "ok", + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never); + + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + }); + + await runner.startSession({ prompt: "test", title: "test" }); + + const callArgs = mockGenerateText.mock.calls[0]![0]!; + expect(callArgs).toHaveProperty("abortSignal"); + expect(callArgs.abortSignal).toBeInstanceOf(AbortSignal); + }); + + it("aborts in-flight turn when close() is called", async () => { + // Create a controllable promise to simulate a long-running turn + let rejectFn: (reason: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((_resolve, reject) => { + rejectFn = reject; + }) as never, + ); + + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + }); + + // Start a turn but don't await — the async function runs synchronously + // up to the first await (generateText), setting activeTurnController + const turnPromise = runner.startSession({ + prompt: "long task", + title: "test", + }); + + // The activeTurnController should be set synchronously before the await + // Access the private field to get the controller directly + const controller = ( + runner as unknown as { activeTurnController: AbortController | null } + ).activeTurnController; + expect(controller).not.toBeNull(); + expect(controller!.signal.aborted).toBe(false); + + // Close the runner — should abort the in-flight controller + await runner.close(); + expect(controller!.signal.aborted).toBe(true); + + // Reject the mock so the turn settles + rejectFn!(new Error("aborted")); + const result = await turnPromise; + expect(result.status).toBe("failed"); + }); +}); + +describe("ClaudeCodeRunner heartbeat", () => { + // Path-aware mtime tracking for heartbeat tests. + // The heartbeat polls both .git/index and the workspace root dir. + let mtimeByPath: Record<string, number>; + + beforeEach(() => { + vi.useFakeTimers(); + mtimeByPath = { + "/tmp/workspace/.git/index": 1000, + "/tmp/workspace": 1000, + }; + mockStatSync.mockImplementation((p: unknown) => { + const key = String(p); + return { mtimeMs: mtimeByPath[key] ?? 0 } as never; + }); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("emits activity_heartbeat when git index mtime changes during execution", async () => { + let resolveFn: (value: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((resolve) => { + resolveFn = resolve; + }) as never, + ); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + heartbeatIntervalMs: 5000, + }); + + const turnPromise = runner.startSession({ + prompt: "long task", + title: "test", + }); + + // Initial poll — no change, no heartbeat + vi.advanceTimersByTime(5000); + expect(events.filter((e) => e.event === "activity_heartbeat")).toHaveLength( + 0, + ); + + // Simulate a git index change (only git, not workspace dir) + mtimeByPath["/tmp/workspace/.git/index"] = 2000; + vi.advanceTimersByTime(5000); + const heartbeats = events.filter((e) => e.event === "activity_heartbeat"); + expect(heartbeats).toHaveLength(1); + expect(heartbeats[0]!.message).toBe( + "workspace file change detected (git index)", + ); + + // Resolve the turn + resolveFn!({ + text: "done", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + }); + await turnPromise; + }); + + it("emits activity_heartbeat when workspace dir mtime changes (non-git activity)", async () => { + let resolveFn: (value: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((resolve) => { + resolveFn = resolve; + }) as never, + ); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + heartbeatIntervalMs: 5000, + }); + + const turnPromise = runner.startSession({ + prompt: "review task", + title: "test", + }); + + // Initial poll — no change + vi.advanceTimersByTime(5000); + expect(events.filter((e) => e.event === "activity_heartbeat")).toHaveLength( + 0, + ); + + // Simulate workspace dir change only (e.g. review agent creating temp file) + mtimeByPath["/tmp/workspace"] = 2000; + vi.advanceTimersByTime(5000); + const heartbeats = events.filter((e) => e.event === "activity_heartbeat"); + expect(heartbeats).toHaveLength(1); + expect(heartbeats[0]!.message).toBe( + "workspace file change detected (workspace dir)", + ); + + resolveFn!({ + text: "done", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + }); + await turnPromise; + }); + + it("emits heartbeat indicating both sources when both change simultaneously", async () => { + let resolveFn: (value: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((resolve) => { + resolveFn = resolve; + }) as never, + ); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + heartbeatIntervalMs: 5000, + }); + + const turnPromise = runner.startSession({ prompt: "task", title: "test" }); + + // Both change at same interval + mtimeByPath["/tmp/workspace/.git/index"] = 2000; + mtimeByPath["/tmp/workspace"] = 2000; + vi.advanceTimersByTime(5000); + const heartbeats = events.filter((e) => e.event === "activity_heartbeat"); + expect(heartbeats).toHaveLength(1); + expect(heartbeats[0]!.message).toBe( + "workspace file change detected (git index and workspace dir)", + ); + + resolveFn!({ + text: "done", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + }); + await turnPromise; + }); + + it("does not emit heartbeat when neither mtime changes", async () => { + let resolveFn: (value: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((resolve) => { + resolveFn = resolve; + }) as never, + ); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + heartbeatIntervalMs: 5000, + }); + + const turnPromise = runner.startSession({ prompt: "task", title: "test" }); + + // Advance through multiple intervals with no mtime change + vi.advanceTimersByTime(20000); + expect(events.filter((e) => e.event === "activity_heartbeat")).toHaveLength( + 0, + ); + + resolveFn!({ + text: "done", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + }); + await turnPromise; + }); + + it("clears heartbeat timer after turn completes", async () => { + let resolveFn: (value: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((resolve) => { + resolveFn = resolve; + }) as never, + ); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + heartbeatIntervalMs: 5000, + }); + + const turnPromise = runner.startSession({ prompt: "task", title: "test" }); + + resolveFn!({ + text: "done", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + }); + await turnPromise; + + // After turn completes, simulate file changes — should NOT emit heartbeats + mtimeByPath["/tmp/workspace/.git/index"] = 9999; + mtimeByPath["/tmp/workspace"] = 9999; + vi.advanceTimersByTime(10000); + expect(events.filter((e) => e.event === "activity_heartbeat")).toHaveLength( + 0, + ); + }); + + it("does not start heartbeat when heartbeatIntervalMs is 0", async () => { + let resolveFn: (value: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((resolve) => { + resolveFn = resolve; + }) as never, + ); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + heartbeatIntervalMs: 0, + }); + + const turnPromise = runner.startSession({ prompt: "task", title: "test" }); + + mtimeByPath["/tmp/workspace/.git/index"] = 9999; + mtimeByPath["/tmp/workspace"] = 9999; + vi.advanceTimersByTime(20000); + expect(events.filter((e) => e.event === "activity_heartbeat")).toHaveLength( + 0, + ); + + resolveFn!({ + text: "done", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + }); + await turnPromise; + }); + + it("emits multiple heartbeats for successive file changes", async () => { + let resolveFn: (value: unknown) => void; + mockGenerateText.mockReturnValueOnce( + new Promise((resolve) => { + resolveFn = resolve; + }) as never, + ); + + const events: CodexClientEvent[] = []; + const runner = new ClaudeCodeRunner({ + cwd: "/tmp/workspace", + model: "sonnet", + onEvent: (event) => events.push(event), + heartbeatIntervalMs: 5000, + }); + + const turnPromise = runner.startSession({ prompt: "task", title: "test" }); + + // First change — git index only + mtimeByPath["/tmp/workspace/.git/index"] = 2000; + vi.advanceTimersByTime(5000); + + // Second change — workspace dir only + mtimeByPath["/tmp/workspace"] = 3000; + vi.advanceTimersByTime(5000); + + // No change on third tick + vi.advanceTimersByTime(5000); + + expect(events.filter((e) => e.event === "activity_heartbeat")).toHaveLength( + 2, + ); + + resolveFn!({ + text: "done", + usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }, + }); + await turnPromise; + }); +}); + +describe("resolveClaudeModelId", () => { + it("maps claude-opus-4 to opus", () => { + expect(resolveClaudeModelId("claude-opus-4")).toBe("opus"); + }); + + it("maps claude-opus-4-6 to opus", () => { + expect(resolveClaudeModelId("claude-opus-4-6")).toBe("opus"); + }); + + it("maps claude-sonnet-4-5 to sonnet", () => { + expect(resolveClaudeModelId("claude-sonnet-4-5")).toBe("sonnet"); + }); + + it("maps claude-haiku-4-5 to haiku", () => { + expect(resolveClaudeModelId("claude-haiku-4-5")).toBe("haiku"); + }); + + it("passes through already-short names unchanged", () => { + expect(resolveClaudeModelId("opus")).toBe("opus"); + expect(resolveClaudeModelId("sonnet")).toBe("sonnet"); + expect(resolveClaudeModelId("haiku")).toBe("haiku"); + }); + + it("passes through unknown model names unchanged", () => { + expect(resolveClaudeModelId("custom-model")).toBe("custom-model"); + }); +}); diff --git a/tests/runners/config.test.ts b/tests/runners/config.test.ts new file mode 100644 index 00000000..6444534c --- /dev/null +++ b/tests/runners/config.test.ts @@ -0,0 +1,118 @@ +import { describe, expect, it } from "vitest"; + +import { resolveWorkflowConfig } from "../../src/config/config-resolver.js"; +import { DEFAULT_RUNNER_KIND } from "../../src/config/defaults.js"; + +describe("runner config resolution", () => { + it("defaults runner.kind to 'codex' when not specified", () => { + const config = resolveWorkflowConfig({ + workflowPath: "/tmp/WORKFLOW.md", + config: {}, + promptTemplate: "test", + }); + + expect(config.runner.kind).toBe("codex"); + expect(config.runner.kind).toBe(DEFAULT_RUNNER_KIND); + expect(config.runner.model).toBeNull(); + }); + + it("reads runner.kind from YAML config", () => { + const config = resolveWorkflowConfig({ + workflowPath: "/tmp/WORKFLOW.md", + config: { + runner: { + kind: "claude-code", + model: "opus", + }, + }, + promptTemplate: "test", + }); + + expect(config.runner.kind).toBe("claude-code"); + expect(config.runner.model).toBe("opus"); + }); + + it("reads runner.kind gemini from YAML config", () => { + const config = resolveWorkflowConfig({ + workflowPath: "/tmp/WORKFLOW.md", + config: { + runner: { + kind: "gemini", + model: "gemini-2.5-pro", + }, + }, + promptTemplate: "test", + }); + + expect(config.runner.kind).toBe("gemini"); + expect(config.runner.model).toBe("gemini-2.5-pro"); + }); + + it("handles runner with kind only (no model)", () => { + const config = resolveWorkflowConfig({ + workflowPath: "/tmp/WORKFLOW.md", + config: { + runner: { + kind: "claude-code", + }, + }, + promptTemplate: "test", + }); + + expect(config.runner.kind).toBe("claude-code"); + expect(config.runner.model).toBeNull(); + }); + + it("preserves codex config alongside runner config", () => { + const config = resolveWorkflowConfig({ + workflowPath: "/tmp/WORKFLOW.md", + config: { + runner: { + kind: "claude-code", + model: "sonnet", + }, + codex: { + command: "codex app-server", + }, + }, + promptTemplate: "test", + }); + + expect(config.runner.kind).toBe("claude-code"); + expect(config.codex.command).toBe("codex app-server"); + }); + + it("stage-level runner overrides top-level runner", () => { + const config = resolveWorkflowConfig({ + workflowPath: "/tmp/WORKFLOW.md", + config: { + runner: { + kind: "codex", + }, + stages: { + investigate: { + type: "agent", + runner: "claude-code", + model: "opus", + on_complete: "implement", + }, + implement: { + type: "agent", + runner: "codex", + on_complete: "done", + }, + done: { + type: "terminal", + }, + }, + }, + promptTemplate: "test", + }); + + expect(config.runner.kind).toBe("codex"); + expect(config.stages).not.toBeNull(); + expect(config.stages!.stages.investigate!.runner).toBe("claude-code"); + expect(config.stages!.stages.investigate!.model).toBe("opus"); + expect(config.stages!.stages.implement!.runner).toBe("codex"); + }); +}); diff --git a/tests/runners/factory.test.ts b/tests/runners/factory.test.ts new file mode 100644 index 00000000..0f33d277 --- /dev/null +++ b/tests/runners/factory.test.ts @@ -0,0 +1,88 @@ +import { describe, expect, it, vi } from "vitest"; + +import type { CodexClientEvent } from "../../src/codex/app-server-client.js"; +import { ClaudeCodeRunner } from "../../src/runners/claude-code-runner.js"; +import { + createRunnerFromConfig, + isAiSdkRunner, +} from "../../src/runners/factory.js"; +import { GeminiRunner } from "../../src/runners/gemini-runner.js"; +import type { RunnerKind } from "../../src/runners/types.js"; +import { RUNNER_KINDS } from "../../src/runners/types.js"; + +vi.mock("ai", () => ({ + generateText: vi.fn(), +})); + +vi.mock("ai-sdk-provider-claude-code", () => ({ + claudeCode: vi.fn(() => "mock-claude-model"), +})); + +vi.mock("ai-sdk-provider-gemini-cli", () => ({ + createGeminiProvider: vi.fn(() => vi.fn()), +})); + +describe("createRunnerFromConfig", () => { + it("creates ClaudeCodeRunner for kind 'claude-code'", () => { + const onEvent = vi.fn(); + const runner = createRunnerFromConfig({ + config: { kind: "claude-code", model: "opus" }, + cwd: "/tmp/workspace", + onEvent, + }); + + expect(runner).toBeInstanceOf(ClaudeCodeRunner); + }); + + it("creates GeminiRunner for kind 'gemini'", () => { + const onEvent = vi.fn(); + const runner = createRunnerFromConfig({ + config: { kind: "gemini", model: "gemini-2.5-pro" }, + cwd: "/tmp/workspace", + onEvent, + }); + + expect(runner).toBeInstanceOf(GeminiRunner); + }); + + it("throws for kind 'codex'", () => { + expect(() => + createRunnerFromConfig({ + config: { kind: "codex", model: null }, + cwd: "/tmp/workspace", + onEvent: vi.fn(), + }), + ).toThrow("Codex runner uses the native CodexAppServerClient"); + }); + + it("uses default model when model is null", () => { + const runner = createRunnerFromConfig({ + config: { kind: "claude-code", model: null }, + cwd: "/tmp/workspace", + onEvent: vi.fn(), + }); + + // Default model for claude-code is "sonnet" + expect(runner).toBeInstanceOf(ClaudeCodeRunner); + }); +}); + +describe("isAiSdkRunner", () => { + it("returns true for claude-code", () => { + expect(isAiSdkRunner("claude-code")).toBe(true); + }); + + it("returns true for gemini", () => { + expect(isAiSdkRunner("gemini")).toBe(true); + }); + + it("returns false for codex", () => { + expect(isAiSdkRunner("codex")).toBe(false); + }); +}); + +describe("RUNNER_KINDS", () => { + it("contains all supported runner kinds", () => { + expect(RUNNER_KINDS).toEqual(["codex", "claude-code", "gemini"]); + }); +}); diff --git a/tests/runners/gemini-runner.test.ts b/tests/runners/gemini-runner.test.ts new file mode 100644 index 00000000..8df48159 --- /dev/null +++ b/tests/runners/gemini-runner.test.ts @@ -0,0 +1,175 @@ +import { describe, expect, it, vi } from "vitest"; + +import type { CodexClientEvent } from "../../src/codex/app-server-client.js"; +import { GeminiRunner } from "../../src/runners/gemini-runner.js"; + +const mockModel = vi.fn(); + +vi.mock("ai", () => ({ + generateText: vi.fn(), +})); + +vi.mock("ai-sdk-provider-gemini-cli", () => ({ + createGeminiProvider: vi.fn(() => mockModel), +})); + +import { generateText } from "ai"; + +const mockGenerateText = vi.mocked(generateText); + +describe("GeminiRunner", () => { + it("implements AgentRunnerCodexClient interface", () => { + const runner = new GeminiRunner({ + cwd: "/tmp/workspace", + model: "gemini-2.5-pro", + }); + + expect(typeof runner.startSession).toBe("function"); + expect(typeof runner.continueTurn).toBe("function"); + expect(typeof runner.close).toBe("function"); + }); + + it("calls generateText with gemini model on startSession", async () => { + mockModel.mockReturnValue("mock-gemini-model"); + mockGenerateText.mockResolvedValueOnce({ + text: "Hello from Gemini", + usage: { + inputTokens: 200, + outputTokens: 100, + totalTokens: 300, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never); + + const runner = new GeminiRunner({ + cwd: "/tmp/workspace", + model: "gemini-2.5-pro", + }); + + const result = await runner.startSession({ + prompt: "Review the code", + title: "ABC-123: Review", + }); + + expect(mockModel).toHaveBeenCalledWith("gemini-2.5-pro"); + expect(mockGenerateText).toHaveBeenCalledWith({ + model: "mock-gemini-model", + prompt: "Review the code", + }); + expect(result.status).toBe("completed"); + expect(result.message).toBe("Hello from Gemini"); + expect(result.usage).toEqual({ + inputTokens: 200, + outputTokens: 100, + totalTokens: 300, + }); + }); + + it("emits session_started and turn_completed events", async () => { + mockModel.mockReturnValue("mock-gemini-model"); + mockGenerateText.mockResolvedValueOnce({ + text: "Done", + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never); + + const events: CodexClientEvent[] = []; + const runner = new GeminiRunner({ + cwd: "/tmp/workspace", + model: "gemini-2.5-pro", + onEvent: (event) => events.push(event), + }); + + await runner.startSession({ prompt: "test", title: "test" }); + + expect(events).toHaveLength(2); + expect(events[0]!.event).toBe("session_started"); + expect(events[0]!.codexAppServerPid).toBeNull(); + expect(events[1]!.event).toBe("turn_completed"); + expect(events[1]!.usage).toEqual({ + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + }); + }); + + it("emits turn_failed on error", async () => { + mockModel.mockReturnValue("mock-gemini-model"); + mockGenerateText.mockRejectedValueOnce(new Error("Gemini unavailable")); + + const events: CodexClientEvent[] = []; + const runner = new GeminiRunner({ + cwd: "/tmp/workspace", + model: "gemini-2.5-pro", + onEvent: (event) => events.push(event), + }); + + const result = await runner.startSession({ + prompt: "test", + title: "test", + }); + + expect(result.status).toBe("failed"); + expect(result.message).toBe("Gemini unavailable"); + expect(events.map((e) => e.event)).toEqual([ + "session_started", + "turn_failed", + ]); + }); + + it("increments turn count across calls", async () => { + const mockResult = { + text: "ok", + usage: { + inputTokens: 10, + outputTokens: 5, + totalTokens: 15, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { + textTokens: undefined, + reasoningTokens: undefined, + }, + }, + } as never; + mockModel.mockReturnValue("mock-gemini-model"); + mockGenerateText + .mockResolvedValueOnce(mockResult) + .mockResolvedValueOnce(mockResult); + + const runner = new GeminiRunner({ + cwd: "/tmp/workspace", + model: "gemini-2.5-pro", + }); + + const first = await runner.startSession({ prompt: "p1", title: "t" }); + const second = await runner.continueTurn("p2", "t"); + + expect(first.turnId).toBe("turn-1"); + expect(second.turnId).toBe("turn-2"); + expect(first.threadId).toBe(second.threadId); + }); +}); diff --git a/tests/runners/integration-smoke.test.ts b/tests/runners/integration-smoke.test.ts new file mode 100644 index 00000000..38f804de --- /dev/null +++ b/tests/runners/integration-smoke.test.ts @@ -0,0 +1,95 @@ +/** + * Integration smoke tests for AI SDK provider runners. + * + * These tests call the real providers (claude-code, gemini-cli) with trivial + * prompts and verify that output is returned. They require authenticated CLIs: + * - `claude` CLI (Claude Code Max subscription) + * - `gemini` CLI (Google paid subscription) + * + * Skipped by default — CI doesn't have auth'd CLIs. + * + * Run manually: + * npx vitest run tests/runners/integration-smoke.test.ts + * + * Or run a single provider: + * npx vitest run tests/runners/integration-smoke.test.ts -t "claude" + * npx vitest run tests/runners/integration-smoke.test.ts -t "gemini" + */ +import { describe, expect, it } from "vitest"; + +import { ClaudeCodeRunner } from "../../src/runners/claude-code-runner.js"; +import { GeminiRunner } from "../../src/runners/gemini-runner.js"; + +const SKIP = process.env.RUN_INTEGRATION !== "1"; + +describe.skipIf(SKIP)("integration: AI SDK provider smoke tests", () => { + it("claude-code runner returns text from a trivial prompt", async () => { + const runner = new ClaudeCodeRunner({ + cwd: process.cwd(), + model: "sonnet", + }); + + try { + const result = await runner.startSession({ + prompt: 'Respond with exactly: "hello from claude"', + title: "smoke-test", + }); + + expect(result.status).toBe("completed"); + expect(result.message).toBeTruthy(); + expect(typeof result.message).toBe("string"); + expect(result.usage).not.toBeNull(); + console.log( + ` Claude response (${result.usage?.totalTokens ?? "?"} tokens): ${result.message?.slice(0, 100)}`, + ); + } finally { + await runner.close(); + } + }, 60_000); + + it("claude-code runner maps full model IDs to short names", async () => { + const runner = new ClaudeCodeRunner({ + cwd: process.cwd(), + model: "claude-sonnet-4-5", // Should be mapped to "sonnet" + }); + + try { + const result = await runner.startSession({ + prompt: 'Respond with exactly: "model id test"', + title: "smoke-test-model-id", + }); + + expect(result.status).toBe("completed"); + expect(result.message).toBeTruthy(); + console.log( + ` Claude (mapped model) response: ${result.message?.slice(0, 100)}`, + ); + } finally { + await runner.close(); + } + }, 60_000); + + it("gemini runner returns text from a trivial prompt", async () => { + const runner = new GeminiRunner({ + cwd: process.cwd(), + model: "gemini-2.5-pro", + }); + + try { + const result = await runner.startSession({ + prompt: 'Respond with exactly: "hello from gemini"', + title: "smoke-test", + }); + + expect(result.status).toBe("completed"); + expect(result.message).toBeTruthy(); + expect(typeof result.message).toBe("string"); + expect(result.usage).not.toBeNull(); + console.log( + ` Gemini response (${result.usage?.totalTokens ?? "?"} tokens): ${result.message?.slice(0, 100)}`, + ); + } finally { + await runner.close(); + } + }, 60_000); +}); diff --git a/tests/session-store.test.ts b/tests/session-store.test.ts new file mode 100644 index 00000000..39fc3f0b --- /dev/null +++ b/tests/session-store.test.ts @@ -0,0 +1,284 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +// Mock the AI SDK modules before importing handler +vi.mock("ai", () => ({ + streamText: vi.fn(), +})); + +vi.mock("ai-sdk-provider-claude-code", () => ({ + claudeCode: vi.fn(), +})); + +vi.mock("../src/slack-bot/stream-consumer.js", () => ({ + StreamConsumer: vi.fn().mockImplementation(() => ({ + append: vi.fn().mockResolvedValue(undefined), + finish: vi.fn().mockResolvedValue(undefined), + })), +})); + +import { streamText } from "ai"; +import { claudeCode } from "ai-sdk-provider-claude-code"; + +import type { BoltMessageArgs } from "../src/slack-bot/handler.js"; +import { createMessageHandler } from "../src/slack-bot/handler.js"; +import { + createCcSessionStore, + getCcSessionId, + setCcSessionId, +} from "../src/slack-bot/session-store.js"; +import { StreamConsumer } from "../src/slack-bot/stream-consumer.js"; +import type { ChannelProjectMap, SessionMap } from "../src/slack-bot/types.js"; + +/** Create a mock Bolt message args object. */ +function createMockBoltArgs( + channelId: string, + text: string, + overrides?: Partial<{ + ts: string; + thread_ts: string; + }>, +): { + args: BoltMessageArgs; + say: ReturnType<typeof vi.fn>; + client: { + reactions: { + add: ReturnType<typeof vi.fn>; + remove: ReturnType<typeof vi.fn>; + }; + }; +} { + const say = vi.fn().mockResolvedValue(undefined); + const client = { + reactions: { + add: vi.fn().mockResolvedValue(undefined), + remove: vi.fn().mockResolvedValue(undefined), + }, + assistant: { + threads: { + setStatus: vi.fn().mockResolvedValue(undefined), + }, + }, + }; + + const message: Record<string, unknown> = { + type: "message" as const, + text, + ts: overrides?.ts ?? "1234.5678", + channel: channelId, + user: "U_TEST_USER", + }; + if (overrides?.thread_ts) { + message.thread_ts = overrides.thread_ts; + } + + const args = { + message, + say, + client, + context: { teamId: "T_TEST_TEAM" }, + logger: { debug: vi.fn(), info: vi.fn(), warn: vi.fn(), error: vi.fn() }, + next: vi.fn(), + event: message, + payload: message, + body: { event: message }, + } as unknown as BoltMessageArgs; + + return { args, say, client }; +} + +// Helper to create an async iterable from strings +async function* createAsyncIterable(chunks: string[]): AsyncIterable<string> { + for (const chunk of chunks) { + yield chunk; + } +} + +// Helper to create a mock streamText return value with response promise +function createMockStreamResult(chunks: string[], sessionId?: string) { + const messages = sessionId + ? [{ providerMetadata: { "claude-code": { sessionId } } }] + : []; + return { + textStream: createAsyncIterable(chunks), + response: Promise.resolve({ messages }), + } as unknown as ReturnType<typeof streamText>; +} + +describe("CcSessionStore", () => { + it("returns undefined for unknown thread ID", () => { + const store = createCcSessionStore(); + expect(getCcSessionId(store, "slack:C123:1234.5678")).toBeUndefined(); + }); + + it("stores and retrieves a session ID for a thread", () => { + const store = createCcSessionStore(); + setCcSessionId(store, "slack:C123:1234.5678", "session-abc-123"); + expect(getCcSessionId(store, "slack:C123:1234.5678")).toBe( + "session-abc-123", + ); + }); + + it("overwrites existing session ID for the same thread", () => { + const store = createCcSessionStore(); + setCcSessionId(store, "slack:C123:1234.5678", "session-old"); + setCcSessionId(store, "slack:C123:1234.5678", "session-new"); + expect(getCcSessionId(store, "slack:C123:1234.5678")).toBe("session-new"); + }); + + it("stores different session IDs for different threads", () => { + const store = createCcSessionStore(); + setCcSessionId(store, "slack:C123:1111.0000", "session-a"); + setCcSessionId(store, "slack:C123:2222.0000", "session-b"); + expect(getCcSessionId(store, "slack:C123:1111.0000")).toBe("session-a"); + expect(getCcSessionId(store, "slack:C123:2222.0000")).toBe("session-b"); + }); +}); + +describe("Session continuity in handler", () => { + beforeEach(() => { + vi.mocked(StreamConsumer).mockImplementation( + () => + ({ + append: vi.fn().mockResolvedValue(undefined), + finish: vi.fn().mockResolvedValue(undefined), + }) as unknown as StreamConsumer, + ); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("passes resume to claudeCode for thread replies with existing session", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + // Thread ID = message.thread_ts || message.ts + const threadTs = "1234.5678"; + + // Pre-populate a CC session ID for this thread (simulates prior interaction) + setCcSessionId(ccSessions, threadTs, "existing-session-id"); + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue( + createMockStreamResult(["Follow-up response"], "updated-session-id"), + ); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args } = createMockBoltArgs("C123", "follow-up question", { + ts: "1234.9999", + thread_ts: threadTs, + }); + await handler(args); + + // Verify claudeCode was called with resume option + expect(claudeCode).toHaveBeenCalledWith( + expect.any(String), + expect.objectContaining({ + cwd: "/tmp/test-project", + permissionMode: "bypassPermissions", + resume: "existing-session-id", + }), + ); + }); + + it("does not pass resume for new top-level messages (no existing session)", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + // ccSessions is empty — no prior session exists + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue( + createMockStreamResult(["Fresh response"], "new-session-id"), + ); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args } = createMockBoltArgs("C123", "brand new message", { + ts: "5678.9012", + }); + await handler(args); + + // Verify claudeCode was called WITHOUT resume + expect(claudeCode).toHaveBeenCalledWith(expect.any(String), { + cwd: "/tmp/test-project", + permissionMode: "bypassPermissions", + }); + }); + + it("stores session ID from provider metadata after response", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue( + createMockStreamResult(["Hello"], "returned-session-id"), + ); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args } = createMockBoltArgs("C123", "test", { ts: "1234.5678" }); + await handler(args); + + // Thread ID = message.thread_ts || message.ts = "1234.5678" + expect(getCcSessionId(ccSessions, "1234.5678")).toBe("returned-session-id"); + }); + + it("does not store session ID when provider metadata lacks it", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + // No sessionId in the response + vi.mocked(streamText).mockReturnValue(createMockStreamResult(["Hello"])); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args } = createMockBoltArgs("C123", "test", { ts: "1234.5678" }); + await handler(args); + + // Verify no session ID was stored + expect(getCcSessionId(ccSessions, "1234.5678")).toBeUndefined(); + }); +}); diff --git a/tests/slack-bot/format.test.ts b/tests/slack-bot/format.test.ts new file mode 100644 index 00000000..4b8ec6ed --- /dev/null +++ b/tests/slack-bot/format.test.ts @@ -0,0 +1,80 @@ +import { describe, expect, it } from "vitest"; + +import { markdownToMrkdwn } from "../../src/slack-bot/format.js"; + +describe("markdownToMrkdwn", () => { + it("converts markdown links to Slack mrkdwn links", () => { + expect(markdownToMrkdwn("[Click here](https://example.com)")).toBe( + "<https://example.com|Click here>", + ); + }); + + it("converts headers to bold text", () => { + expect(markdownToMrkdwn("## My Header")).toBe("*My Header*"); + expect(markdownToMrkdwn("# Title")).toBe("*Title*"); + expect(markdownToMrkdwn("### Subsection")).toBe("*Subsection*"); + }); + + it("converts bold markdown to Slack bold", () => { + expect(markdownToMrkdwn("This is **bold** text")).toBe( + "This is *bold* text", + ); + }); + + it("converts italic markdown to Slack italic", () => { + expect(markdownToMrkdwn("This is *italic* text")).toBe( + "This is _italic_ text", + ); + }); + + it("converts strikethrough markdown to Slack strikethrough", () => { + expect(markdownToMrkdwn("This is ~~struck~~ text")).toBe( + "This is ~struck~ text", + ); + }); + + it("preserves fenced code blocks", () => { + const input = "Before\n```\nconst x = **bold**;\n```\nAfter"; + const result = markdownToMrkdwn(input); + expect(result).toContain("```\nconst x = **bold**;\n```"); + expect(result).toContain("Before"); + expect(result).toContain("After"); + }); + + it("preserves inline code", () => { + const input = "Use `**not bold**` for code"; + const result = markdownToMrkdwn(input); + // The backtick content should be preserved exactly as-is + expect(result).toBe("Use `**not bold**` for code"); + }); + + it("handles multiple protected regions", () => { + const input = + "Run `npm install` then check ```\npackage.json\n``` and use `yarn` too"; + const result = markdownToMrkdwn(input); + expect(result).toContain("`npm install`"); + expect(result).toContain("```\npackage.json\n```"); + expect(result).toContain("`yarn`"); + }); + + it("handles mixed conversions", () => { + const input = + "## Setup\n\nInstall **dependencies** with `npm install`, then visit [docs](https://docs.example.com).\n\nThis is *important* and ~~deprecated~~."; + const result = markdownToMrkdwn(input); + expect(result).toContain("*Setup*"); + expect(result).toContain("*dependencies*"); + expect(result).toContain("`npm install`"); + expect(result).toContain("<https://docs.example.com|docs>"); + expect(result).toContain("_important_"); + expect(result).toContain("~deprecated~"); + }); + + it("returns plain text unchanged", () => { + const input = "Just a plain message with no formatting."; + expect(markdownToMrkdwn(input)).toBe(input); + }); + + it("handles empty string", () => { + expect(markdownToMrkdwn("")).toBe(""); + }); +}); diff --git a/tests/slack-bot/handler.test.ts b/tests/slack-bot/handler.test.ts new file mode 100644 index 00000000..ab7912da --- /dev/null +++ b/tests/slack-bot/handler.test.ts @@ -0,0 +1,668 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +// Mock the AI SDK modules before importing handler +vi.mock("ai", () => ({ + streamText: vi.fn(), +})); + +vi.mock("ai-sdk-provider-claude-code", () => ({ + claudeCode: vi.fn(), +})); + +vi.mock("../../src/slack-bot/stream-consumer.js", () => ({ + StreamConsumer: vi.fn().mockImplementation(() => ({ + append: vi.fn().mockResolvedValue(undefined), + finish: vi.fn().mockResolvedValue(undefined), + })), +})); + +import { streamText } from "ai"; +import { claudeCode } from "ai-sdk-provider-claude-code"; + +import { + type BoltMessageArgs, + createMessageHandler, + splitAtParagraphs, +} from "../../src/slack-bot/handler.js"; +import { createCcSessionStore } from "../../src/slack-bot/session-store.js"; +import { StreamConsumer } from "../../src/slack-bot/stream-consumer.js"; +import type { + ChannelProjectMap, + SessionMap, +} from "../../src/slack-bot/types.js"; + +/** Create a mock Bolt message args object. */ +function createMockBoltArgs( + channelId: string, + text: string, + overrides?: Partial<{ + ts: string; + thread_ts: string; + bot_id: string; + subtype: string; + user: string; + teamId: string; + }>, +): { + args: BoltMessageArgs; + say: ReturnType<typeof vi.fn>; + client: { + reactions: { + add: ReturnType<typeof vi.fn>; + remove: ReturnType<typeof vi.fn>; + }; + assistant: { + threads: { + setStatus: ReturnType<typeof vi.fn>; + }; + }; + }; +} { + const say = vi.fn().mockResolvedValue(undefined); + const client = { + reactions: { + add: vi.fn().mockResolvedValue(undefined), + remove: vi.fn().mockResolvedValue(undefined), + }, + assistant: { + threads: { + setStatus: vi.fn().mockResolvedValue(undefined), + }, + }, + }; + + const message: Record<string, unknown> = { + type: "message" as const, + text, + ts: overrides?.ts ?? "1234.5678", + channel: channelId, + user: overrides?.user ?? "U_TEST_USER", + }; + if (overrides?.thread_ts) { + message.thread_ts = overrides.thread_ts; + } + if (overrides?.bot_id) { + message.bot_id = overrides.bot_id; + } + if (overrides?.subtype) { + message.subtype = overrides.subtype; + } + + const args = { + message, + say, + client, + context: { teamId: overrides?.teamId ?? "T_TEST_TEAM" }, + logger: { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, + next: vi.fn(), + event: message, + payload: message, + body: { event: message }, + } as unknown as BoltMessageArgs; + + return { args, say, client }; +} + +// Helper to create an async iterable from strings +async function* createAsyncIterable(chunks: string[]): AsyncIterable<string> { + for (const chunk of chunks) { + yield chunk; + } +} + +// Helper to create a mock streamText return value with response promise +function createMockStreamResult(chunks: string[], sessionId?: string) { + const messages = sessionId + ? [{ providerMetadata: { "claude-code": { sessionId } } }] + : []; + return { + textStream: createAsyncIterable(chunks), + response: Promise.resolve({ messages }), + } as unknown as ReturnType<typeof streamText>; +} + +describe("createMessageHandler", () => { + beforeEach(() => { + // Re-establish StreamConsumer mock implementation (restoreAllMocks clears it) + vi.mocked(StreamConsumer).mockImplementation( + () => + ({ + append: vi.fn().mockResolvedValue(undefined), + finish: vi.fn().mockResolvedValue(undefined), + }) as unknown as StreamConsumer, + ); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("calls streamText with claudeCode provider and correct cwd", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue( + createMockStreamResult(["Hello from Claude"]), + ); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + model: "sonnet", + }); + + const { args } = createMockBoltArgs( + "C123", + "What files are in this project?", + ); + await handler(args); + + // Verify claudeCode was called with correct cwd and permissionMode + expect(claudeCode).toHaveBeenCalledWith("sonnet", { + cwd: "/tmp/test-project", + permissionMode: "bypassPermissions", + }); + + // Verify streamText was called with the claudeCode model and prompt + expect(streamText).toHaveBeenCalledWith({ + model: mockModel, + prompt: "What files are in this project?", + }); + }); + + it("uses StreamConsumer for progressive streaming", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue( + createMockStreamResult(["Hello", " world"]), + ); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args } = createMockBoltArgs("C123", "What files?"); + await handler(args); + + // Verify StreamConsumer was constructed with correct params + expect(StreamConsumer).toHaveBeenCalledWith( + expect.anything(), // client + "C123", // channel + "1234.5678", // threadTs + "U_TEST_USER", // userId + "T_TEST_TEAM", // teamId + ); + + // Get the mock instance from the constructor's return value + const consumerInstance = vi.mocked(StreamConsumer).mock.results[0]! + .value as { + append: ReturnType<typeof vi.fn>; + finish: ReturnType<typeof vi.fn>; + }; + + // Verify append was called for each chunk + expect(consumerInstance.append).toHaveBeenCalledWith("Hello"); + expect(consumerInstance.append).toHaveBeenCalledWith(" world"); + + // Verify finish was called + expect(consumerInstance.finish).toHaveBeenCalled(); + }); + + it("sets thinking status before streaming", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue(createMockStreamResult(["OK"])); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args, client } = createMockBoltArgs("C123", "test"); + await handler(args); + + expect(client.assistant.threads.setStatus).toHaveBeenCalledWith({ + channel_id: "C123", + thread_ts: "1234.5678", + status: "is thinking...", + }); + }); + + it("silently handles setStatus failure", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue(createMockStreamResult(["OK"])); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args, client } = createMockBoltArgs("C123", "test"); + client.assistant.threads.setStatus.mockRejectedValue( + new Error("missing_scope"), + ); + + // Should not throw + await handler(args); + }); + + it("uses bypassPermissions for all CC invocations", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue(createMockStreamResult(["OK"])); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args } = createMockBoltArgs("C123", "test"); + await handler(args); + + expect(claudeCode).toHaveBeenCalledWith( + expect.any(String), + expect.objectContaining({ permissionMode: "bypassPermissions" }), + ); + }); + + it("posts warning when channel has no mapped project directory", async () => { + const channelMap: ChannelProjectMap = new Map(); // empty + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args, say, client } = createMockBoltArgs("C999", "hello"); + await handler(args); + + expect(say).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("No project directory mapped"), + }), + ); + // Should still remove eyes and add warning + expect(client.reactions.remove).toHaveBeenCalledWith( + expect.objectContaining({ name: "eyes" }), + ); + expect(client.reactions.add).toHaveBeenCalledWith( + expect.objectContaining({ name: "warning" }), + ); + }); + + it("handles streamText errors by posting structured error message", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + + // Create a failing async iterable (plain object to avoid lint/useYield) + const failingStream: AsyncIterable<string> = { + [Symbol.asyncIterator]() { + return { + async next(): Promise<IteratorResult<string>> { + throw new Error("Claude Code failed"); + }, + }; + }, + }; + + vi.mocked(streamText).mockReturnValue({ + textStream: failingStream, + response: Promise.resolve({ messages: [] }), + } as unknown as ReturnType<typeof streamText>); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args, say, client } = createMockBoltArgs("C123", "test"); + await handler(args); + + // Should post structured error message + expect(say).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("Error:"), + }), + ); + expect(say).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("Claude Code failed"), + }), + ); + // Should replace eyes with x + expect(client.reactions.remove).toHaveBeenCalledWith( + expect.objectContaining({ name: "eyes" }), + ); + expect(client.reactions.add).toHaveBeenCalledWith( + expect.objectContaining({ name: "x" }), + ); + }); + + it("cleans up StreamConsumer on error", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + + const failingStream: AsyncIterable<string> = { + [Symbol.asyncIterator]() { + return { + async next(): Promise<IteratorResult<string>> { + throw new Error("stream error"); + }, + }; + }, + }; + + vi.mocked(streamText).mockReturnValue({ + textStream: failingStream, + response: Promise.resolve({ messages: [] }), + } as unknown as ReturnType<typeof streamText>); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args } = createMockBoltArgs("C123", "test"); + await handler(args); + + // Get the mock instance from the constructor's return value + const consumerInstance = vi.mocked(StreamConsumer).mock.results[0]! + .value as { finish: ReturnType<typeof vi.fn> }; + + // finish should have been called for cleanup + expect(consumerInstance.finish).toHaveBeenCalled(); + }); + + it("tracks session state in the sessions map", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue(createMockStreamResult(["OK"])); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args } = createMockBoltArgs("C123", "test"); + await handler(args); + + // Thread ID = message.thread_ts || message.ts = "1234.5678" + const session = sessions.get("1234.5678"); + expect(session).toBeDefined(); + expect(session?.channelId).toBe("C123"); + expect(session?.projectDir).toBe("/tmp/test-project"); + }); + + it("skips messages with bot_id", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args, say } = createMockBoltArgs("C123", "bot message", { + bot_id: "B123", + }); + await handler(args); + + expect(say).not.toHaveBeenCalled(); + }); + + it("skips messages with subtype message_changed", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args, say } = createMockBoltArgs("C123", "edited", { + subtype: "message_changed", + }); + await handler(args); + + expect(say).not.toHaveBeenCalled(); + }); + + it("skips messages with subtype message_deleted", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + const { args, say } = createMockBoltArgs("C123", "", { + subtype: "message_deleted", + }); + await handler(args); + + expect(say).not.toHaveBeenCalled(); + }); + + it("resumes CC session for thread replies", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + + // First message: returns a sessionId via providerMetadata + vi.mocked(streamText).mockReturnValue( + createMockStreamResult(["First response"], "cc-session-abc"), + ); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + model: "sonnet", + }); + + // First message in thread + const { args: firstArgs } = createMockBoltArgs("C123", "first message", { + ts: "1000.0001", + }); + await handler(firstArgs); + + // Verify first call does NOT include resume + expect(claudeCode).toHaveBeenCalledWith("sonnet", { + cwd: "/tmp/test-project", + permissionMode: "bypassPermissions", + }); + + // Second message: reply in same thread + vi.mocked(claudeCode).mockClear(); + vi.mocked(streamText).mockReturnValue( + createMockStreamResult(["Second response"], "cc-session-abc"), + ); + + const { args: secondArgs } = createMockBoltArgs("C123", "follow up", { + ts: "1000.0002", + thread_ts: "1000.0001", + }); + await handler(secondArgs); + + // Verify second call includes resume with session ID + expect(claudeCode).toHaveBeenCalledWith("sonnet", { + cwd: "/tmp/test-project", + permissionMode: "bypassPermissions", + resume: "cc-session-abc", + }); + }); + + it("starts fresh session for new thread (no resume)", async () => { + const channelMap: ChannelProjectMap = new Map([ + ["C123", "/tmp/test-project"], + ]); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + const mockModel = { id: "mock-claude-code-model" }; + + vi.mocked(claudeCode).mockReturnValue( + mockModel as unknown as ReturnType<typeof claudeCode>, + ); + vi.mocked(streamText).mockReturnValue(createMockStreamResult(["Hello"])); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + model: "sonnet", + }); + + const { args } = createMockBoltArgs("C123", "brand new thread", { + ts: "9999.0001", + }); + await handler(args); + + // Should not include resume option + expect(claudeCode).toHaveBeenCalledWith("sonnet", { + cwd: "/tmp/test-project", + permissionMode: "bypassPermissions", + }); + }); + + it("/project set updates channel map and responds", async () => { + const channelMap: ChannelProjectMap = new Map(); + const sessions: SessionMap = new Map(); + const ccSessions = createCcSessionStore(); + + const handler = createMessageHandler({ + channelMap, + sessions, + ccSessions, + }); + + const { args, say } = createMockBoltArgs( + "C123", + "/project set /home/user/new-project", + ); + await handler(args); + + // Channel map should be updated + expect(channelMap.get("C123")).toBe("/home/user/new-project"); + + // Should respond with confirmation + expect(say).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("/home/user/new-project"), + }), + ); + + // Should NOT call streamText (slash command short-circuits) + expect(streamText).not.toHaveBeenCalled(); + }); +}); + +describe("splitAtParagraphs", () => { + it("splits text at double newlines", () => { + expect(splitAtParagraphs("a\n\nb\n\nc")).toEqual(["a", "b", "c"]); + }); + + it("returns single element for text without paragraph breaks", () => { + expect(splitAtParagraphs("single line")).toEqual(["single line"]); + }); + + it("handles multiple consecutive newlines", () => { + expect(splitAtParagraphs("a\n\n\n\nb")).toEqual(["a", "b"]); + }); + + it("filters empty chunks", () => { + expect(splitAtParagraphs("\n\na\n\n\n\nb\n\n")).toEqual(["a", "b"]); + }); +}); diff --git a/tests/slack-bot/index.test.ts b/tests/slack-bot/index.test.ts new file mode 100644 index 00000000..d907d28c --- /dev/null +++ b/tests/slack-bot/index.test.ts @@ -0,0 +1,45 @@ +import { describe, expect, it, vi } from "vitest"; + +// Mock @slack/bolt before importing +vi.mock("@slack/bolt", () => ({ + App: vi.fn().mockImplementation(() => ({ + message: vi.fn(), + start: vi.fn().mockResolvedValue(undefined), + })), +})); + +import { App } from "@slack/bolt"; +import { createSlackBoltApp } from "../../src/slack-bot/index.js"; +import { loadSlackBotConfig } from "../../src/slack-bot/server.js"; + +describe("createSlackBoltApp", () => { + it("constructs App with socketMode: true", () => { + const channelMap = new Map([["C123", "/tmp/project"]]); + + createSlackBoltApp({ + botToken: "xoxb-test", + appToken: "xapp-test", + channelMap, + }); + + expect(App).toHaveBeenCalledWith({ + token: "xoxb-test", + appToken: "xapp-test", + socketMode: true, + }); + }); +}); + +describe("env var validation", () => { + it("throws clear error when SLACK_APP_TOKEN is missing", () => { + expect(() => loadSlackBotConfig({ SLACK_BOT_TOKEN: "xoxb-test" })).toThrow( + "SLACK_APP_TOKEN", + ); + }); + + it("throws clear error when SLACK_BOT_TOKEN is missing", () => { + expect(() => loadSlackBotConfig({ SLACK_APP_TOKEN: "xapp-test" })).toThrow( + "SLACK_BOT_TOKEN", + ); + }); +}); diff --git a/tests/slack-bot/reactions.test.ts b/tests/slack-bot/reactions.test.ts new file mode 100644 index 00000000..33372b5f --- /dev/null +++ b/tests/slack-bot/reactions.test.ts @@ -0,0 +1,91 @@ +import { describe, expect, it, vi } from "vitest"; + +import { + markError, + markProcessing, + markSuccess, + markWarning, +} from "../../src/reactions.js"; + +/** Create a mock WebClient with reactions.add and reactions.remove. */ +function createMockClient() { + return { + reactions: { + add: vi.fn().mockResolvedValue({ ok: true }), + remove: vi.fn().mockResolvedValue({ ok: true }), + }, + }; +} + +describe("markProcessing", () => { + it('adds "eyes" reaction', async () => { + const client = createMockClient(); + + await markProcessing(client as never, "C123", "1234.5678"); + + expect(client.reactions.add).toHaveBeenCalledWith({ + channel: "C123", + timestamp: "1234.5678", + name: "eyes", + }); + expect(client.reactions.add).toHaveBeenCalledTimes(1); + expect(client.reactions.remove).not.toHaveBeenCalled(); + }); +}); + +describe("markSuccess", () => { + it('removes "eyes" and adds "white_check_mark"', async () => { + const client = createMockClient(); + + await markSuccess(client as never, "C123", "1234.5678"); + + expect(client.reactions.remove).toHaveBeenCalledWith({ + channel: "C123", + timestamp: "1234.5678", + name: "eyes", + }); + expect(client.reactions.add).toHaveBeenCalledWith({ + channel: "C123", + timestamp: "1234.5678", + name: "white_check_mark", + }); + }); +}); + +describe("markError", () => { + it('removes "eyes" and adds "x"', async () => { + const client = createMockClient(); + + await markError(client as never, "C123", "1234.5678"); + + expect(client.reactions.remove).toHaveBeenCalledWith({ + channel: "C123", + timestamp: "1234.5678", + name: "eyes", + }); + expect(client.reactions.add).toHaveBeenCalledWith({ + channel: "C123", + timestamp: "1234.5678", + name: "x", + }); + }); +}); + +describe("markWarning", () => { + it('removes "eyes" and adds "warning"', async () => { + const client = createMockClient(); + + await markWarning(client as never, "C123", "1234.5678"); + + expect(client.reactions.remove).toHaveBeenCalledWith({ + channel: "C123", + timestamp: "1234.5678", + name: "eyes", + }); + expect(client.reactions.add).toHaveBeenCalledWith({ + channel: "C123", + timestamp: "1234.5678", + name: "warning", + }); + }); +}); diff --git a/tests/slack-bot/server.test.ts b/tests/slack-bot/server.test.ts new file mode 100644 index 00000000..a123f94b --- /dev/null +++ b/tests/slack-bot/server.test.ts @@ -0,0 +1,57 @@ +import { describe, expect, it } from "vitest"; + +import { loadSlackBotConfig } from "../../src/slack-bot/server.js"; + +describe("loadSlackBotConfig", () => { + it("exits with error when required env vars are missing", () => { + expect(() => loadSlackBotConfig({})).toThrow(); + }); + + it("names the missing variable SLACK_BOT_TOKEN", () => { + expect(() => loadSlackBotConfig({ SLACK_APP_TOKEN: "xapp-token" })).toThrow( + "SLACK_BOT_TOKEN", + ); + }); + + it("names the missing variable SLACK_APP_TOKEN", () => { + expect(() => loadSlackBotConfig({ SLACK_BOT_TOKEN: "xoxb-token" })).toThrow( + "SLACK_APP_TOKEN", + ); + }); + + it("parses channel project map from JSON", () => { + const config = loadSlackBotConfig({ + SLACK_BOT_TOKEN: "xoxb-test", + SLACK_APP_TOKEN: "xapp-test", + CHANNEL_PROJECT_MAP: '{"C123":"/tmp/project-a"}', + }); + expect(config.channelMap).toBeInstanceOf(Map); + expect(config.channelMap.get("C123")).toBe("/tmp/project-a"); + }); + + it("empty channel map when CHANNEL_PROJECT_MAP is not set", () => { + const config = loadSlackBotConfig({ + SLACK_BOT_TOKEN: "xoxb-test", + SLACK_APP_TOKEN: "xapp-test", + }); + expect(config.channelMap).toBeInstanceOf(Map); + expect(config.channelMap.size).toBe(0); + }); + + it("includes CLAUDE_MODEL when set", () => { + const config = loadSlackBotConfig({ + SLACK_BOT_TOKEN: "xoxb-test", + SLACK_APP_TOKEN: "xapp-test", + CLAUDE_MODEL: "opus", + }); + expect(config.model).toBe("opus"); + }); + + it("omits model when CLAUDE_MODEL is not set", () => { + const config = loadSlackBotConfig({ + SLACK_BOT_TOKEN: "xoxb-test", + SLACK_APP_TOKEN: "xapp-test", + }); + expect(config.model).toBeUndefined(); + }); +}); diff --git a/tests/slack-bot/stream-consumer.test.ts b/tests/slack-bot/stream-consumer.test.ts new file mode 100644 index 00000000..14d72d67 --- /dev/null +++ b/tests/slack-bot/stream-consumer.test.ts @@ -0,0 +1,211 @@ +import { describe, expect, it, vi } from "vitest"; + +import { SLACK_MAX_CHARS } from "../../src/chunking.js"; +import { StreamConsumer } from "../../src/slack-bot/stream-consumer.js"; + +/** Create a mock WebClient with chatStream support. */ +function createMockClient() { + const mockStreamer = { + append: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockResolvedValue(undefined), + }; + + const client = { + chatStream: vi.fn().mockReturnValue(mockStreamer), + }; + + return { client, mockStreamer }; +} + +describe("StreamConsumer", () => { + it("creates stream lazily on first append", async () => { + const { client, mockStreamer } = createMockClient(); + + const consumer = new StreamConsumer( + client as never, + "C123", + "1234.5678", + "U456", + "T789", + ); + + // No stream created yet + expect(client.chatStream).not.toHaveBeenCalled(); + + await consumer.append("Hello"); + + // Now stream should be created + expect(client.chatStream).toHaveBeenCalledTimes(1); + expect(client.chatStream).toHaveBeenCalledWith({ + channel: "C123", + thread_ts: "1234.5678", + recipient_user_id: "U456", + recipient_team_id: "T789", + }); + + // Text should be appended + expect(mockStreamer.append).toHaveBeenCalledWith({ + markdown_text: "Hello", + }); + }); + + it("finish is a no-op when no stream was started", async () => { + const { client } = createMockClient(); + + const consumer = new StreamConsumer( + client as never, + "C123", + "1234.5678", + "U456", + "T789", + ); + + // Should not throw + await consumer.finish(); + expect(client.chatStream).not.toHaveBeenCalled(); + }); + + it("finish stops the current stream", async () => { + const { client, mockStreamer } = createMockClient(); + + const consumer = new StreamConsumer( + client as never, + "C123", + "1234.5678", + "U456", + "T789", + ); + + await consumer.append("Hello"); + await consumer.finish(); + + expect(mockStreamer.stop).toHaveBeenCalledTimes(1); + }); + + it("handles overflow by starting a new stream at 39K boundary", async () => { + const streamers = [ + { + append: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockResolvedValue(undefined), + }, + { + append: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockResolvedValue(undefined), + }, + ]; + let streamIndex = 0; + + const client = { + chatStream: vi.fn().mockImplementation(() => { + const s = streamers[streamIndex]; + streamIndex++; + return s; + }), + }; + + const consumer = new StreamConsumer( + client as never, + "C123", + "1234.5678", + "U456", + "T789", + ); + + // Append text that's just under the limit + const nearLimit = "x".repeat(SLACK_MAX_CHARS - 100); + await consumer.append(nearLimit); + + expect(client.chatStream).toHaveBeenCalledTimes(1); + + // Append text that pushes over the limit + const overflow = "y".repeat(200); + await consumer.append(overflow); + + // Should have created a second stream + expect(client.chatStream).toHaveBeenCalledTimes(2); + + // First stream should have been stopped + expect(streamers[0]!.stop).toHaveBeenCalledTimes(1); + + // Second stream should have the overflow text + expect(streamers[1]!.append).toHaveBeenCalledWith({ + markdown_text: overflow, + }); + + await consumer.finish(); + expect(streamers[1]!.stop).toHaveBeenCalledTimes(1); + }); + + it("handles undefined teamId", async () => { + const { client } = createMockClient(); + + const consumer = new StreamConsumer( + client as never, + "C123", + "1234.5678", + "U456", + undefined, + ); + + await consumer.append("Hello"); + + expect(client.chatStream).toHaveBeenCalledWith({ + channel: "C123", + thread_ts: "1234.5678", + recipient_user_id: "U456", + }); + }); + + it("suppresses errors from stop during cleanup", async () => { + const mockStreamer = { + append: vi.fn().mockResolvedValue(undefined), + stop: vi.fn().mockRejectedValue(new Error("stream already stopped")), + }; + + const client = { + chatStream: vi.fn().mockReturnValue(mockStreamer), + }; + + const consumer = new StreamConsumer( + client as never, + "C123", + "1234.5678", + "U456", + "T789", + ); + + await consumer.append("Hello"); + + // Should not throw even though stop() rejects + await consumer.finish(); + }); + + it("appends multiple chunks to the same stream within limit", async () => { + const { client, mockStreamer } = createMockClient(); + + const consumer = new StreamConsumer( + client as never, + "C123", + "1234.5678", + "U456", + "T789", + ); + + await consumer.append("Hello "); + await consumer.append("world"); + + // Only one stream created + expect(client.chatStream).toHaveBeenCalledTimes(1); + + // Two appends + expect(mockStreamer.append).toHaveBeenCalledTimes(2); + expect(mockStreamer.append).toHaveBeenNthCalledWith(1, { + markdown_text: "Hello ", + }); + expect(mockStreamer.append).toHaveBeenNthCalledWith(2, { + markdown_text: "world", + }); + + await consumer.finish(); + }); +}); diff --git a/tests/test-alpha.test.ts b/tests/test-alpha.test.ts new file mode 100644 index 00000000..2d508a90 --- /dev/null +++ b/tests/test-alpha.test.ts @@ -0,0 +1,13 @@ +import { describe, expect, it } from "vitest"; + +import { TEST_ALPHA_READY, greet } from "../src/test-alpha.js"; + +describe("test-alpha", () => { + it("module is ready", () => { + expect(TEST_ALPHA_READY).toBe(true); + }); + + it("greet returns expected string", () => { + expect(greet("Symphony")).toBe("Hello, Symphony!"); + }); +}); diff --git a/tests/version.test.ts b/tests/version.test.ts new file mode 100644 index 00000000..8f820d35 --- /dev/null +++ b/tests/version.test.ts @@ -0,0 +1,35 @@ +import { createRequire } from "node:module"; +import { beforeEach, describe, expect, it } from "vitest"; + +import { + VERSION, + _resetGitShaCache, + getDisplayVersion, +} from "../src/version.js"; + +const require = createRequire(import.meta.url); + +describe("version module", () => { + beforeEach(() => { + _resetGitShaCache(); + }); + + it("VERSION matches package.json", () => { + const pkg = require("../package.json") as { version: string }; + expect(VERSION).toBe(pkg.version); + }); + + it("display version includes git SHA", () => { + const display = getDisplayVersion(); + // In a git repo, should be VERSION+7-char-hex + expect(display).toMatch( + new RegExp(`^${VERSION.replace(/\./g, "\\.")}\\+[0-9a-f]{7}$`), + ); + }); + + it("caches git SHA across calls", () => { + const first = getDisplayVersion(); + const second = getDisplayVersion(); + expect(first).toBe(second); + }); +}); diff --git a/tests/workspace/workspace-manager.test.ts b/tests/workspace/workspace-manager.test.ts index 2dc8a6bb..c257e4b1 100644 --- a/tests/workspace/workspace-manager.test.ts +++ b/tests/workspace/workspace-manager.test.ts @@ -2,10 +2,11 @@ import { mkdtemp, writeFile } from "node:fs/promises"; import { tmpdir } from "node:os"; import { join } from "node:path"; -import { afterEach, describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { ERROR_CODES } from "../../src/errors/codes.js"; import { + AsyncMutex, WorkspaceHookRunner, WorkspaceManager, type WorkspacePathError, @@ -131,6 +132,179 @@ describe("WorkspaceManager", () => { createdNow: true, }); }); + + it("serialises afterCreate hook calls for workspaces under the same root", async () => { + const root = await createRoot(); + const execOrder: string[] = []; + + const hooks = new WorkspaceHookRunner({ + config: { + afterCreate: "prepare", + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 5_000, + }, + execute: async (_script, options) => { + execOrder.push(options.cwd); + if (execOrder.length === 1) { + // Pause to let the second caller queue up behind the mutex. + await new Promise<void>((r) => setTimeout(r, 20)); + } + return { exitCode: 0, signal: null, stdout: "", stderr: "" }; + }, + }); + + const manager = new WorkspaceManager({ root, hooks }); + + // Start both creations concurrently. + const [w1, w2] = await Promise.all([ + manager.createForIssue("issue-aaa"), + manager.createForIssue("issue-bbb"), + ]); + + // Both workspaces should have been created. + expect(w1.createdNow).toBe(true); + expect(w2.createdNow).toBe(true); + + // The two afterCreate hooks must have run one after the other. + // The exact ordering is not guaranteed, but the array must contain + // exactly two distinct paths. + expect(execOrder).toHaveLength(2); + expect(new Set(execOrder).size).toBe(2); + }); + + it("does not block removeForIssue while afterCreate hook is running", async () => { + const root = await createRoot(); + let hookRunning = false; + let removeCalledWhileHookRunning = false; + + const hooks = new WorkspaceHookRunner({ + config: { + afterCreate: "prepare", + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 5_000, + }, + execute: async (_script, _options) => { + hookRunning = true; + // Give removeForIssue a chance to run while this hook is "executing". + await new Promise<void>((r) => setTimeout(r, 20)); + hookRunning = false; + return { exitCode: 0, signal: null, stdout: "", stderr: "" }; + }, + }); + + const manager = new WorkspaceManager({ root, hooks }); + + const createPromise = manager.createForIssue("issue-123"); + + // Poll briefly until the hook has started. + await new Promise<void>((r) => setTimeout(r, 5)); + + // removeForIssue should proceed without waiting for the mutex. + const removePromise = manager.removeForIssue("issue-123").then((result) => { + removeCalledWhileHookRunning = hookRunning; + return result; + }); + + await Promise.all([createPromise, removePromise]); + + // Remove ran while the hook was still executing (i.e. was not blocked). + expect(removeCalledWhileHookRunning).toBe(true); + }); +}); + +describe("AsyncMutex", () => { + it("allows the first caller to acquire immediately", async () => { + const mutex = new AsyncMutex(); + expect(mutex.depth).toBe(0); + + const release = await mutex.acquire(); + expect(mutex.depth).toBe(1); + + release(); + expect(mutex.depth).toBe(0); + }); + + it("queues a second caller until the first releases", async () => { + const mutex = new AsyncMutex(); + const order: string[] = []; + + const release1 = await mutex.acquire(); + order.push("acquired-1"); + + // Start second acquire – it should not resolve until release1() is called. + const p2 = mutex.acquire().then((release2) => { + order.push("acquired-2"); + release2(); + }); + + // Depth should now be 2 (one holder + one waiter). + expect(mutex.depth).toBe(2); + + release1(); + await p2; + + expect(order).toEqual(["acquired-1", "acquired-2"]); + expect(mutex.depth).toBe(0); + }); + + it("reports depth accurately across multiple waiters", async () => { + const mutex = new AsyncMutex(); + + const r1 = await mutex.acquire(); + const p2 = mutex.acquire(); + const p3 = mutex.acquire(); + + expect(mutex.depth).toBe(3); + + r1(); + const r2 = await p2; + expect(mutex.depth).toBe(2); + + r2(); + const r3 = await p3; + expect(mutex.depth).toBe(1); + + r3(); + expect(mutex.depth).toBe(0); + }); + + it("logs queue depth when creation is queued behind another", async () => { + const root = await createRoot(); + const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + + const hooks = new WorkspaceHookRunner({ + config: { + afterCreate: "prepare", + beforeRun: null, + afterRun: null, + beforeRemove: null, + timeoutMs: 5_000, + }, + execute: async (_script, _options) => { + await new Promise<void>((r) => setTimeout(r, 30)); + return { exitCode: 0, signal: null, stdout: "", stderr: "" }; + }, + }); + + const manager = new WorkspaceManager({ root, hooks }); + + await Promise.all([ + manager.createForIssue("issue-aaa"), + manager.createForIssue("issue-bbb"), + ]); + + // At least one call should mention "queued". + const queuedLogs = logSpy.mock.calls.filter((args) => + String(args[0]).includes("queued"), + ); + expect(queuedLogs.length).toBeGreaterThanOrEqual(1); + + logSpy.mockRestore(); + }); }); async function createRoot(): Promise<string> { diff --git a/workpad.md b/workpad.md new file mode 100644 index 00000000..9d9a21c5 --- /dev/null +++ b/workpad.md @@ -0,0 +1,33 @@ +## Workpad +**Environment**: pro14:/Users/ericlitman/intent/workspaces/architecture-build/repo/symphony-ts@73532bb + +### Plan + +- [x] Add `analyze` subcommand to `ops/symphony-ctl` + - [x] Accept optional JSONL path (default: most recent `/tmp/symphony-logs-*/symphony.jsonl`) + - [x] Parse `stage_completed` events for per-issue/per-stage summaries + - [x] Parse `turn_completed` events for per-turn granularity + - [x] Output formatted text report: run summary, per-issue table, per-stage averages, cache efficiency, outliers + - [x] Support `--json` flag for machine-readable output + - [x] Handle missing fields gracefully (older logs) + - [x] Use only standard unix tools (jq, awk, sort) — no extra dependencies + +### Acceptance Criteria + +- [x] `symphony-ctl analyze <path>` prints a formatted text report +- [x] `symphony-ctl analyze --json <path>` outputs machine-readable JSON +- [x] Default path uses most recent `/tmp/symphony-logs-*/symphony.jsonl` +- [x] Missing fields produce zero/unknown gracefully +- [x] No new npm dependencies added +- [x] Full test suite: 435 passed, 3 skipped, 0 failed + +### Validation +- Bash syntax check passed: `bash -n ops/symphony-ctl` +- Text output verified with 4-stage test log including outliers +- Empty/missing-field logs handled gracefully +- Default path detection picks most recently modified log +- TypeScript: `npx tsc --noEmit` → exit 0 +- Tests: `pnpm test` → 435 passed, 3 skipped, 0 failed + +### Notes +- 2026-03-21 SYMPH-28 implementation complete. PR opened.