diff --git a/.codex/hooks.json b/.codex/hooks.json index 9468d875a..8976a14aa 100644 --- a/.codex/hooks.json +++ b/.codex/hooks.json @@ -26,33 +26,21 @@ ] } ], - "PostToolUse": [ - { - "id": "posttooluse-detected-npm-run-lint", - "command": "npm run lint", - "description": "Run lightweight repository validation after edits (Node.js)", - "blocking": false, - "timeout": 120000, - "sdks": [ - "*" - ] - } - ], "PrePush": [ { - "id": "prepush-git-diff-check", - "command": "git diff --check", - "description": "Check tracked changes for whitespace and conflict-marker issues before push", + "id": "prepush-go-vet", + "command": "go vet ./...", + "description": "Run go vet before push", "blocking": true, - "timeout": 30000, + "timeout": 120000, "sdks": [ "*" ] }, { - "id": "prepush-detected-npm-run-prepush-check", - "command": "npm run prepush:check", - "description": "Run detected repository quality gate before push (Node.js)", + "id": "prepush-go-build", + "command": "go build ./...", + "description": "Verify Go build succeeds before push", "blocking": true, "timeout": 300000, "sdks": [ @@ -76,6 +64,6 @@ "meta": { "profile": "balanced", "generatedBy": "bosun setup", - "generatedAt": "2026-03-28T22:29:00.000Z" + "generatedAt": "2026-03-27T06:21:21.749Z" } } diff --git a/.continue/environments.json b/.continue/environments.json deleted file mode 100644 index 9e10eea8c..000000000 --- a/.continue/environments.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "install": "npm install", - "start": "npm start", - "resourceSize": "medium" - } -] diff --git a/.env.example b/.env.example index 258099243..4f1292894 100644 --- a/.env.example +++ b/.env.example @@ -91,8 +91,6 @@ TELEGRAM_MINIAPP_ENABLED=false # BOSUN_UI_BROWSER_OPEN_MODE=manual # Legacy auto-open toggle for UI server (requires BOSUN_UI_BROWSER_OPEN_MODE=auto) # BOSUN_UI_AUTO_OPEN_BROWSER=false -# Daemon startup keeps browser auto-open disabled unless this is explicitly true. -# BOSUN_UI_AUTO_OPEN_ON_DAEMON=false # Show full /?token=... browser URL in logs (default: false; token is hidden) # BOSUN_UI_LOG_TOKENIZED_BROWSER_URL=false # Setup wizard browser auto-open (default: true when mode=auto) @@ -1020,17 +1018,6 @@ COPILOT_CLOUD_DISABLED=true # WORKFLOW_RECOVERY_BACKOFF_MAX_MS=60000 # Random jitter ratio (0.0-0.9) applied to backoff to prevent retry storms. # WORKFLOW_RECOVERY_BACKOFF_JITTER_RATIO=0.2 -# Delay startup workflow recovery actions so the daemon can settle before -# resuming interrupted runs or firing schedule/task-poll recovery. -# WORKFLOW_RECOVERY_STARTUP_GRACE_MS=30000 -# Additional delay inserted between each startup recovery action. -# WORKFLOW_RECOVERY_STARTUP_STEP_DELAY_MS=15000 - -# Bosun MCP policy: by default Bosun-launched agents only receive validated -# library-managed MCP servers, with required auth pulled from environment. -# BOSUN_MCP_REQUIRE_AUTH=true -# BOSUN_MCP_ALLOW_EXTERNAL_SOURCES=false -# BOSUN_MCP_ALLOW_DEFAULT_SERVERS=false # ─── GitHub Issue Reconciler ───────────────────────────────────────────────── # Periodically reconciles open GitHub issues against open/merged PRs. diff --git a/.githooks/pre-commit b/.githooks/pre-commit index bf4d3c585..7ead0d021 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -22,7 +22,6 @@ cd "$ROOT" echo "[hooks] bosun pre-commit checks" npm run syntax:check npm run prompt:lint -node tools/prepublish-check.mjs MISSING_SUMMARIES="$(node --input-type=module -e "import { scanRepository } from './lib/codebase-audit.mjs'; const scan = scanRepository(process.cwd(), { staged: true, dryRun: true }); const missing = scan.files.filter((file) => !file.hasSummary).map((file) => file.path); if (missing.length > 0) { console.log(missing.join('\\n')); }" 2>/dev/null || true)" if [ -n "$MISSING_SUMMARIES" ]; then diff --git a/.githooks/pre-push b/.githooks/pre-push index d95c31e59..e485e4a8e 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -63,12 +63,12 @@ npm run smoke:packed-cli # Format: "source_prefix|test_glob_1|test_glob_2|..." # Globs are matched against tests/*.test.mjs filenames (basename only). ADJACENCY_MAP=( - "workflow/|workflow-*|workflow-task-lifecycle*|workflow-write-file-encoding*|workflow-pipeline-primitives*|manual-flows*|mcp-workflow-adapter*|bosun-native-workflow-nodes*|meeting-workflow*|run-evaluator*|webhook-gateway*|credential-store*|cron-scheduler*" + "workflow/|workflow-*|workflow-task-lifecycle*|workflow-pipeline-primitives*|manual-flows*|mcp-workflow-adapter*|bosun-native-workflow-nodes*|meeting-workflow*|run-evaluator*|webhook-gateway*|credential-store*|cron-scheduler*" "workflow-templates/|workflow-templates*|workflow-new-templates*|workflow-engine*|manual-flows*" "task/|task-*|workflow-task-lifecycle*|kanban-*|ve-orchestrator*|vk-api*|ve-kanban*" "kanban/|kanban-*|task-store*|task-claims*|ve-kanban*|ve-orchestrator*|vk-api*" - "workspace/|workspace-*|shared-state*|worktree-*|worktree-recovery-regression*|sync-engine*" - "infra/|monitor-*|heartbeat-monitor*|daemon-*|restart-*|startup-*|maintenance-*|anomaly-*|preflight*|tracing*|tui-bridge*|windows-hidden-child-processes*|weekly-agent-work-report*|workflow-task-lifecycle*|workflow-engine*" + "workspace/|workspace-*|shared-state*|worktree-*|sync-engine*" + "infra/|monitor-*|daemon-*|restart-*|startup-*|maintenance-*|anomaly-*|preflight*|tracing*|tui-bridge*|weekly-agent-work-report*|workflow-task-lifecycle*|workflow-engine*" "agent/|agent-*|primary-agent*|fleet-*|review-agent*|analyze-agent*|autofix*|streaming-agent*|hook-library*|weekly-agent-work-report*" "bench/|bench-swebench*|benchmark-*|task-*" "config/|config-*|workspace-health*|bosun-skills*|codex-config*" @@ -77,7 +77,7 @@ ADJACENCY_MAP=( "tui/|tui-*|task-*|tui-events*|tui-bridge*|ui-server-tui-events*" "ui/|ui-*|tui-*|stream-timeline*|workflow-canvas-utils*|static-relative*|mui-theme*|tab-swipe*|session-theme*|session-history*|demo-*|fleet-tab*|portal-ui-smoke*" "site/|ui-*|stream-timeline*|static-relative*|demo-*|portal-ui-smoke*" - "server/|ui-server*|setup-web*|bosun-mcp-server*|tunnel-*|ui-realtime*|presence*|portal-ui-smoke*|guardrails*" + "server/|ui-server*|setup-web*|bosun-mcp-server*|tunnel-*|ui-realtime*|presence*|portal-ui-smoke*" "github/|github-*|hook-profiles*" "git/|git-*|branch-*|diff-stats*|conflict-*" "shell/|*-shell*|codex-shell*|gemini-shell*|opencode-shell*|sdk-shell*|continue-detection*" diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md deleted file mode 100644 index f2fd9b595..000000000 --- a/.github/copilot-instructions.md +++ /dev/null @@ -1,106 +0,0 @@ -# Bosun Copilot Instructions - -Bosun is a Node.js ESM control plane for autonomous software engineering. It ships a CLI, setup wizard, long-running monitor, workflow engine, task/workspace orchestration, GitHub/Jira integrations, Telegram and WhatsApp channels, a browser Mini App, an Electron desktop shell, and a static docs/marketing site. The repo is large and multi-surface; most changes should stay inside one module plus its direct tests. - -## Start Here - -- Always read `AGENTS.md` at the repo root first, then the closest module `AGENTS.md` before editing. -- Prefer Node 24 locally to match CI. Docs say Node 18+ works; this repo was validated here with Node `v24.11.1` and npm `11.6.2` on Windows. -- Always run `npm install` before any build or test command. `postinstall.mjs` applies compatibility checks/shims, may install `desktop/` dependencies, and auto-installs `.githooks` when possible. -- Keep the worktree clean before trusting failures. This repo has string/snapshot-style guard tests, so unrelated local edits can make `npm test` fail in ways that do not reflect your change. -- On Windows, prefer PowerShell for npm/node commands. If you must run `.githooks/pre-push` directly in a worktree, use Git for Windows bash instead of WSL bash. - -## Verified Command Order - -Bootstrap: - -- `npm install` - Verified success. Always do this first. - -Fast local validation: - -- `npm run syntax:check` - Verified success in ~2s. This uses `tools/syntax-check.mjs`, rejects browser-served modules with top-level `await`, and validates local import/export bindings for browser `.js` and `.mjs` files under `ui/` and `site/ui/`. -- `npm run prompt:lint` - Verified success in <1s. Run this whenever you touch prompts, hooks, agent instructions, or `.bosun/agents` content. -- `node cli.mjs --help` - Verified success. Use this for a cheap CLI sanity check. - -Build and packaging validation: - -- `node tools/prepublish-check.mjs` or `npm run prepublishOnly` - Verified via `npm run prepublishOnly` in ~26s. This is important: if you add a new published runtime file imported by shipped code, you must also add it to `package.json` `files`, or this check fails. -- `npm run build` - Verified success in <1s. This is a vendor sync/build step, not a TypeScript compile. -- `npm run build:docs` - Verified success in <1s. Run this when changing `_docs/`, docs generation, or the site docs pipeline. - -Tests: - -- `npm test` - Runs Vitest only, and `pretest` already runs `npm run syntax:check` first. In this workspace it took ~185s but failed because the worktree already contained unrelated edits under `infra/monitor.mjs`; do not assume that specific failure is caused by your change. -- `npm run test:node` - Verified success in ~17s. Run this for `*.node.test.mjs`, portal smoke, and Node-runtime behaviors. -- `npm run test:all` - Use this when your change can affect both Vitest suites and Node test suites. -- Focused example that passed here: - `npm test -- tests/config-validation.test.mjs tests/workflow-templates-e2e.test.mjs -- -t "template-bosun-pr-watchdog installs, executes, and returns valid context"` - Took ~170s. Some workflow-template tests print expected stderr when external creds like `GITHUB_PERSONAL_ACCESS_TOKEN` or `EXA_API_KEY` are absent. -- `npm run check:native-call-parity` - Verified success in ~4s. CI runs this separately; do not skip it for voice/native-call changes. -- `npm run audit:ci` - Verified success but expensive here (~16 min). Run it when touching codebase-audit behavior, CLAUDE summaries/manifests, or when CI has `BOSUN_AUDIT_CI=1` enabled. - -Run commands: - -- `npm run site:serve` - Verified success; served the site locally at `http://127.0.0.1:4173`. -- `npm run setup` starts the web setup wizard. `npm start` runs `node cli.mjs --config-dir .bosun --repo-root . --no-update-check` and expects repo-local config/state. - -## CI / Hook Gates To Mirror - -- Main CI is `.github/workflows/ci.yaml` on Node 24. It runs: `npm ci`, `node tools/prepublish-check.mjs`, `npm run prompt:lint`, `npm run smoke:packed-cli`, `npm run build`, optional `npm run audit:ci`, `npm run check:native-call-parity`, then `npm test`. -- Local pre-commit runs `npm run syntax:check` and `npm run prompt:lint`, then warns if staged source files are missing `CLAUDE:SUMMARY` annotations. -- Local pre-push always runs syntax, prepublish check, and packed CLI smoke, then chooses targeted tests or the full suite based on changed files. -- If you add a new module or test file, update `ADJACENCY_MAP` in `.githooks/pre-push` so smart pre-push routing can still find the right tests. -- Site deploy uses Node 20 and resolves the local `site/ui` symlink into a real copy before publishing; remember that GitHub Pages will not follow symlinks. -- Hosted demo fixes often require touching both `ui/` and `site/ui/`. If a site tab imports a helper from `site/ui/tabs/`, make sure the file actually exists there rather than only in `ui/tabs/`. - -## High-Signal Layout - -- `cli.mjs`: main CLI/router and first-run behavior. -- `setup.mjs`: interactive/non-interactive setup, env/config generation, hook scaffolding. -- `infra/`: monitor loop, restart/recovery, runtime services. -- `workflow/`: engine, node registry, migration, workflow APIs. -- `workflow-templates/`: built-in reusable templates and pipeline helpers. -- `task/`: task execution, claims, archiving, CLI. -- `workspace/`: workspaces, worktrees, shared state, context indexing. -- `agent/`: agent pool, prompts, hooks, reports, fleet coordination. -- `shell/`: Codex/Copilot/Claude/OpenCode executor integrations. -- `server/` and `ui/`: Mini App backend/frontend and setup UI. -- `github/`, `kanban/`, `telegram/`, `voice/`: integrations. -- `site/`: public website and generated docs output. -- `tools/`: syntax, docs build, prepublish, vendor sync, hook utilities. -- `tests/`: Vitest and Node suites; `vitest.config.mjs` excludes `*.node.test.mjs` from `npm test`. - -Important config files: - -- `package.json`: scripts, exports, publishable `files` list. -- `.env.example`: required/optional environment variables and security-sensitive defaults. -- `bosun.config.example.json` and `bosun.schema.json`: config shape references. -- `vitest.config.mjs`, `playwright.config.mjs`, `stryker.config.mjs`: test tooling. -- `.github/workflows/*.yml`: CI, publish, Docker, site deploy, mutation testing, PR automation. -- `.github/hooks/bosun.hooks.json`: Copilot hook bridge integration. - -## Change Routing - -- Config/setup changes: run `npm test -- tests/*config*.test.mjs tests/*setup*.test.mjs`. -- Workflow engine/template changes: run `npm test -- tests/workflow-*.test.mjs tests/workflow-templates*.test.mjs`. -- Workspace/shared-state changes: run `npm test -- tests/workspace-*.test.mjs tests/worktree-*.test.mjs tests/shared-state*.test.mjs`. -- Task changes: run `npm test -- tests/task-*.test.mjs tests/*task*.test.mjs`. -- Server/UI/setup changes: run `npm test -- tests/*ui*.test.mjs tests/*setup*.test.mjs` and `npm run test:node` when portal smoke/runtime is involved. -- Infra/monitor/restart changes: run `npm test -- tests/*monitor*.test.mjs tests/*restart*.test.mjs`. - -## Do Not Re-discover This - -- Trust this file first. Only search the repo when these instructions are incomplete, contradicted by the nearest `AGENTS.md`, or proven wrong by a command/result in the current worktree. diff --git a/.github/hooks/bosun.hooks.json b/.github/hooks/bosun.hooks.json index 9cb1365be..c644cd61b 100644 --- a/.github/hooks/bosun.hooks.json +++ b/.github/hooks/bosun.hooks.json @@ -5,7 +5,7 @@ "type": "command", "command": [ "node", - "agent/agent-hook-bridge.mjs", + "C:\\Users\\jON\\Documents\\source\\repos\\virtengine-gh\\bosun\\agent\\agent-hook-bridge.mjs", "--agent", "copilot", "--event", @@ -19,7 +19,7 @@ "type": "command", "command": [ "node", - "agent/agent-hook-bridge.mjs", + "C:\\Users\\jON\\Documents\\source\\repos\\virtengine-gh\\bosun\\agent\\agent-hook-bridge.mjs", "--agent", "copilot", "--event", @@ -34,7 +34,7 @@ "type": "command", "command": [ "node", - "agent/agent-hook-bridge.mjs", + "C:\\Users\\jON\\Documents\\source\\repos\\virtengine-gh\\bosun\\agent\\agent-hook-bridge.mjs", "--agent", "copilot", "--event", @@ -50,7 +50,7 @@ "type": "command", "command": [ "node", - "agent/agent-hook-bridge.mjs", + "C:\\Users\\jON\\Documents\\source\\repos\\virtengine-gh\\bosun\\agent\\agent-hook-bridge.mjs", "--agent", "copilot", "--event", diff --git a/.gitignore b/.gitignore index 83ad90035..d68c17fdc 100644 --- a/.gitignore +++ b/.gitignore @@ -63,13 +63,4 @@ reports/mutation/ .tmp-* .bosun-monitor/backups/* -tmp/* - -node-compile-cache/* -*/*.tmp -*/*.log -*.log -*.tmp -{{*}} -*tmp* -*null* +tmp/* \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index ea5d0a052..91f47fcd3 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -16,108 +16,6 @@ "uriFormat": "%s", "action": "openExternally" } - }, - { - "name": "Debug Bosun CLI", - "type": "node", - "request": "launch", - "program": "${workspaceFolder}/cli.mjs", - "cwd": "${workspaceFolder}", - "args": [ - "--config-dir", - "${workspaceFolder}/.bosun", - "--repo-root", - "${workspaceFolder}", - "--no-update-check", - "--no-auto-update" - ], - "env": { - "BOSUN_DIR": "${workspaceFolder}/.bosun", - "REPO_ROOT": "${workspaceFolder}", - "BOSUN_SKIP_UPDATE_CHECK": "1", - "BOSUN_SKIP_AUTO_UPDATE": "1" - }, - "console": "integratedTerminal", - "internalConsoleOptions": "neverOpen", - "smartStep": true, - "autoAttachChildProcesses": true, - "skipFiles": [ - "/**" - ] - }, - { - "name": "Debug Bosun Monitor Direct", - "type": "node", - "request": "launch", - "program": "${workspaceFolder}/infra/monitor.mjs", - "cwd": "${workspaceFolder}", - "env": { - "BOSUN_DIR": "${workspaceFolder}/.bosun", - "REPO_ROOT": "${workspaceFolder}", - "BOSUN_SKIP_UPDATE_CHECK": "1", - "BOSUN_SKIP_AUTO_UPDATE": "1" - }, - "console": "integratedTerminal", - "internalConsoleOptions": "neverOpen", - "smartStep": true, - "skipFiles": [ - "/**" - ] - }, - { - "name": "Debug Bosun Daemon Child (foreground)", - "type": "node", - "request": "launch", - "program": "${workspaceFolder}/cli.mjs", - "cwd": "${workspaceFolder}", - "args": [ - "--daemon-child", - "--config-dir", - "${workspaceFolder}/.bosun", - "--repo-root", - "${workspaceFolder}", - "--no-update-check", - "--no-auto-update" - ], - "env": { - "BOSUN_DAEMON": "1", - "BOSUN_DIR": "${workspaceFolder}/.bosun", - "REPO_ROOT": "${workspaceFolder}", - "BOSUN_SKIP_UPDATE_CHECK": "1", - "BOSUN_SKIP_AUTO_UPDATE": "1", - "BOSUN_UI_AUTO_OPEN_ON_DAEMON": "false" - }, - "console": "integratedTerminal", - "internalConsoleOptions": "neverOpen", - "smartStep": true, - "autoAttachChildProcesses": true, - "skipFiles": [ - "/**" - ] - }, - { - "name": "Attach to Bosun CLI Startup (9229)", - "type": "node", - "request": "attach", - "address": "127.0.0.1", - "port": 9229, - "restart": true, - "preLaunchTask": "Bosun: Start CLI with Inspector (9229)", - "skipFiles": [ - "/**" - ] - }, - { - "name": "Attach to Bosun Monitor Startup (9230)", - "type": "node", - "request": "attach", - "address": "127.0.0.1", - "port": 9230, - "restart": true, - "preLaunchTask": "Bosun: Start Monitor with Inspector (9230)", - "skipFiles": [ - "/**" - ] } ] } diff --git a/.vscode/mcp.json b/.vscode/mcp.json index da39e4ffa..3df453335 100644 --- a/.vscode/mcp.json +++ b/.vscode/mcp.json @@ -1,3 +1,31 @@ { - "mcpServers": {} + "mcpServers": { + "context7": { + "command": "npx", + "args": [ + "-y", + "@upstash/context7-mcp" + ], + "startup_timeout_sec": 120 + }, + "sequential-thinking": { + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-sequential-thinking" + ], + "startup_timeout_sec": 120 + }, + "playwright": { + "command": "npx", + "args": [ + "-y", + "@playwright/mcp@latest" + ], + "startup_timeout_sec": 120 + }, + "microsoft-docs": { + "url": "https://learn.microsoft.com/api/mcp" + } + } } diff --git a/.vscode/tasks.json b/.vscode/tasks.json index fe5118a6e..df348902e 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -26,84 +26,6 @@ } } }, - { - "label": "Bosun: Start CLI with Inspector (9229)", - "type": "shell", - "command": "node --inspect-brk=9229 cli.mjs --config-dir .bosun --repo-root . --no-update-check --no-auto-update", - "options": { - "cwd": "${workspaceFolder}" - }, - "isBackground": true, - "presentation": { - "reveal": "always", - "panel": "dedicated", - "clear": true - }, - "problemMatcher": { - "owner": "custom", - "pattern": { - "regexp": ".+" - }, - "background": { - "activeOnStart": true, - "beginsPattern": ".", - "endsPattern": "^Debugger listening on ws://127\\.0\\.0\\.1:9229/" - } - } - }, - { - "label": "Bosun: Start Monitor with Inspector (9230)", - "type": "shell", - "command": "node --inspect-brk=9230 infra/monitor.mjs", - "options": { - "cwd": "${workspaceFolder}" - }, - "isBackground": true, - "presentation": { - "reveal": "always", - "panel": "dedicated", - "clear": true - }, - "problemMatcher": { - "owner": "custom", - "pattern": { - "regexp": ".+" - }, - "background": { - "activeOnStart": true, - "beginsPattern": ".", - "endsPattern": "^Debugger listening on ws://127\\.0\\.0\\.1:9230/" - } - } - }, - { - "label": "Bosun: Terminate Runtime", - "type": "shell", - "command": "node cli.mjs --terminate", - "options": { - "cwd": "${workspaceFolder}" - }, - "presentation": { - "reveal": "always", - "panel": "shared", - "clear": true - }, - "problemMatcher": [] - }, - { - "label": "Bosun: Daemon Status", - "type": "shell", - "command": "node cli.mjs --daemon-status", - "options": { - "cwd": "${workspaceFolder}" - }, - "presentation": { - "reveal": "always", - "panel": "shared", - "clear": true - }, - "problemMatcher": [] - }, { "label": "Bump Patch Version", "type": "shell", diff --git a/AGENTS.md b/AGENTS.md index a27856443..3ab03a0e4 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -10,11 +10,6 @@ Open the closest module `AGENTS.md` before editing. 3. Edit only that module plus directly impacted callers/tests. 4. Run targeted tests, then `npm test`, then `npm run build`. -## Pre-Push Pitfalls -- Run `npm run syntax:check` after touching any browser-served module under `ui/` or `site/ui/`. It now validates both syntax and local import/export bindings for `.js` and `.mjs` browser modules. -- If you mirror helpers between `ui/` and `site/ui/`, update both copies in the same change. The hosted demo does not inherit files from `ui/` automatically. -- If you add a new shipped runtime file, run `node tools/prepublish-check.mjs` and update `package.json#files` when needed. - ## Core Entry Points - CLI: `cli.mjs` - Setup: `setup.mjs` diff --git a/README.md b/README.md index c314f26bc..d3c76bdae 100644 --- a/README.md +++ b/README.md @@ -144,16 +144,6 @@ Set `primaryAgent` in `.bosun/bosun.config.json` or choose an executor preset du - `bosun --daemon --sentinel` starts daemon + sentinel together (recommended for unattended operation). - `bosun --terminate` is the clean reset command when you suspect stale/ghost processes. -## VS Code debugging - -Bosun now includes workspace debug entries in `.vscode/launch.json` and helper tasks in `.vscode/tasks.json`. - -- `Debug Bosun CLI` launches `cli.mjs` with the repo-local `.bosun` config and attaches the debugger to the real CLI entry path. -- `Debug Bosun Monitor Direct` launches `infra/monitor.mjs` directly when you want to debug monitor logic without stepping through the CLI worker bootstrap. -- `Debug Bosun Daemon Child (foreground)` runs the daemon-child path without detaching, which is useful for restart-loop and daemon-specific behavior. -- `Attach to Bosun CLI Startup (9229)` and `Attach to Bosun Monitor Startup (9230)` start Bosun under `--inspect-brk` so you can catch startup failures before normal breakpoints would bind. -- `Bosun: Terminate Runtime` is the cleanup task to use if a stale monitor/daemon is holding the lock before a debug session. - Telegram operators can pull the weekly agent work summary with `/weekly [days]` or `/report weekly [days]`. To post it automatically once per week, set `TELEGRAM_WEEKLY_REPORT_ENABLED=true` together with `TELEGRAM_WEEKLY_REPORT_DAY`, `TELEGRAM_WEEKLY_REPORT_HOUR`, and optional `TELEGRAM_WEEKLY_REPORT_DAYS`. ## Documentation diff --git a/_docs/AZURE_AGENTIC_INFRAOPS_BACKPORT_ANALYSIS.md b/_docs/AZURE_AGENTIC_INFRAOPS_BACKPORT_ANALYSIS.md deleted file mode 100644 index c5ccf5ccd..000000000 --- a/_docs/AZURE_AGENTIC_INFRAOPS_BACKPORT_ANALYSIS.md +++ /dev/null @@ -1,345 +0,0 @@ -# Azure Agentic InfraOps Back-Port Analysis - -## Purpose - -This document compares Bosun with the concepts published in the Azure Agentic InfraOps project and identifies which ideas are already present in Bosun, which are only partially present, and which would materially improve Bosun if promoted into first-class features. - -The key conclusion is straightforward: - -- Bosun already provides many of the runtime primitives that Azure Agentic InfraOps cites as inspiration. -- The main opportunity is not feature parity. -- The main opportunity is to make Bosun more opinionated by turning existing scattered patterns into an explicit operating model. - -## What Azure Agentic InfraOps Adds Conceptually - -From the public docs, the Azure project is organized around a stricter orchestration model with: - -- a conductor pattern that maintains the evolving execution plan -- explicit approval gates at critical transitions -- invariant validators between steps, not just at the end -- a repository-first memory model for durable state -- typed session state and resumable checkpoints -- challenger reviews as a built-in maker-checker loop -- stronger cost governance and model-tier routing -- deterministic stop conditions for long-running agent workflows - -Azure Agentic InfraOps is best understood as Bosun-style engineering patterns wrapped in a more prescriptive control framework. - -## What Bosun Already Has - -Bosun already contains strong first-class implementations for many of the underlying primitives: - -- multi-agent orchestration and supervision -- workflow DAG execution -- distributed shared state and claim-based locking -- context shredding and compression -- anomaly detection and circuit breakers -- PR automation and review gating -- prompt registries and skill loading -- workflow evidence collection and validation nodes - -High-signal existing areas: - -- `infra/monitor.mjs` -- `agent/agent-supervisor.mjs` -- `agent/agent-pool.mjs` -- `workflow/workflow-engine.mjs` -- `workflow/workflow-nodes.mjs` -- `task/task-claims.mjs` -- `workspace/shared-state-manager.mjs` -- `infra/anomaly-detector.mjs` -- `config/context-shredding-config.mjs` - -This matters because the recommendations below are largely architectural surfacing and unification work, not ground-up invention. - -## Where Bosun Is Partial Today - -Bosun has the pieces, but several of the Azure concepts are not yet expressed as a single explicit contract. - -### 1. Supervisor Without A True Conductor Ledger - -Bosun has orchestration and supervision, but the plan is not treated as a durable, mutable ledger that is continuously updated as subagents report back. - -Current state: - -- supervision is strong -- replanning is possible -- the plan itself is not a first-class persisted state object - -Impact: - -- less transparent progress across long-running tasks -- weaker resume semantics after interruption -- harder to reason about what changed between iterations - -### 2. Approval Is Present But Too Coarse - -Bosun already has review gates, but approval is not consistently expressed as typed gates on classes of actions. - -Current state: - -- merge and review gates exist -- workflow `action.ask_user` exists -- agent runtime often defaults to coarse approval policies - -Gap: - -- no first-class policy like `approvalRequired: ["deploy", "merge", "secrets", "prod-write"]` -- no unified pause-and-resume contract at those exact boundaries - -### 3. Validation Happens Too Late - -Bosun has strong quality gates, but many checks happen near push, review, or failure recovery time instead of between agent handoffs. - -Gap: - -- no general transition-level invariant validator layer in the workflow engine -- downstream steps can receive outputs that are structurally valid enough to continue, but not semantically trustworthy enough to compound safely - -### 4. Session State Is Not Yet A Canonical Envelope - -Bosun has task state, workflow state, session tracking, and shared state. What it does not yet have is one canonical typed session envelope that describes the current objective, current phase, approvals, retry counts, checkpoints, and completion predicates for a run. - -Gap: - -- harder crash recovery for long multi-step tasks -- weaker auditability for why a run resumed where it did -- harder interoperability between monitor, workflow, and review flows - -### 5. No Built-In Challenger Loop - -Bosun supports review, but it does not consistently use a built-in maker-checker cycle where one agent produces and a second agent challenges against explicit criteria before promotion. - -Gap: - -- quality control is partly reactive -- autofix and remediation loops can remain single-perspective -- review criteria are not always converted into repeated structured evaluation - -### 6. Cost Governance Is Observed More Than Enforced - -Bosun tracks budgets and timeouts, but model-tiering and spend-aware orchestration are not yet a strong first-class control surface. - -Gap: - -- limited per-run token accounting -- limited per-role model routing based on task complexity and cost -- no explicit budget-triggered compaction, downgrade, or halt policy across a whole orchestration run - -### 7. Stop Conditions Are Not Formal Enough - -Bosun has retries, cooldowns, anomaly detection, and circuit breakers. That is not the same thing as deterministic completion logic. - -Gap: - -- no general `goalSatisfied()` contract for long-running orchestration steps -- limited stall detection based on lack of meaningful state change -- limited typed fallback outcomes when iteration caps are reached - -## Highest-Leverage Improvements For Bosun - -These are the changes most worth back-porting from the Azure style of operation. - -### Priority 1: Add A First-Class Run Ledger - -Introduce a durable run ledger for complex tasks and workflows. - -Suggested contents: - -- objective -- current phase -- plan steps -- completed steps -- blocked steps -- approvals granted -- checkpoints -- retry counters -- evidence references -- completion predicate status - -Likely Bosun touchpoints: - -- `infra/monitor.mjs` -- `workflow/workflow-engine.mjs` -- `task/` -- `workspace/shared-state-manager.mjs` - -Outcome: - -- better crash recovery -- more reliable replanning -- clearer operator visibility -- easier subagent coordination - -### Priority 2: Add Typed Approval Gates - -Add policy-driven gates by action class instead of broad runtime approval settings. - -Examples: - -- `merge` -- `prod-deploy` -- `external-write` -- `secret-use` -- `destructive-git` - -Likely Bosun touchpoints: - -- `agent/agent-hooks.mjs` -- `workflow/workflow-nodes/actions.mjs` -- `infra/monitor.mjs` -- configuration schema and runtime config - -Outcome: - -- tighter operator control -- less friction for low-risk automation -- resumable pauses at the right boundaries - -### Priority 3: Add Handoff Validators In The Workflow Engine - -Add an explicit validation layer between agent-producing nodes and downstream consumer nodes. - -Validator types could include: - -- schema validity -- confidence threshold -- required evidence presence -- policy compliance -- semantic completeness -- contradiction or drift detection - -Likely Bosun touchpoints: - -- `workflow/workflow-engine.mjs` -- `workflow/workflow-nodes/validation.mjs` -- `workflow/workflow-contract.mjs` - -Outcome: - -- fewer error cascades -- safer multi-step automation -- better recovery semantics when a step is low quality but not technically failed - -### Priority 4: Add A Built-In Challenger Pattern - -Make maker-checker loops a reusable Bosun workflow and runtime primitive. - -Pattern: - -- maker agent produces output -- challenger agent reviews against explicit criteria -- result is approve, changes requested, or escalate -- iteration cap and fallback policy are mandatory - -Likely Bosun touchpoints: - -- `agent/review-agent.mjs` -- `agent/agent-supervisor.mjs` -- `agent/autofix.mjs` -- `workflow-templates/` - -Outcome: - -- stronger code review automation -- better remediation quality -- less self-confirming single-agent behavior - -### Priority 5: Formalize Deterministic Stop Conditions - -Add explicit completion and stall contracts for long-running flows. - -Examples: - -- `maxIterations` -- `goalSatisfied` -- `noStateChangeForNRounds` -- `budgetExceeded` -- `approvalTimeout` -- `escalateAfter` - -Likely Bosun touchpoints: - -- `infra/monitor.mjs` -- `agent/agent-supervisor.mjs` -- `workflow/workflow-engine.mjs` - -Outcome: - -- fewer ambiguous loops -- better operator trust -- clearer escalation behavior - -### Priority 6: Promote Cost Governance To A First-Class Policy Surface - -Extend Bosun from budget awareness into budget-based orchestration policy. - -Examples: - -- route summarization and cleanup work to cheaper models -- reserve premium models for planning, review, and high-risk tasks -- cap spend per run or per task family -- auto-compact context or downgrade model tiers when thresholds are crossed - -Likely Bosun touchpoints: - -- `agent/agent-pool.mjs` -- `agent/fleet-coordinator.mjs` -- `agent/agent-work-analyzer.mjs` -- config schema - -Outcome: - -- lower operating cost -- more predictable scaling -- better fleet-level scheduling decisions - -## Recommended Implementation Order - -If this becomes an actual Bosun improvement track, the order should be: - -1. Run ledger and resumable session envelope -2. Typed approval gates -3. Handoff validators -4. Challenger workflow template and runtime support -5. Deterministic stop conditions -6. Cost governance policy surface - -That sequence improves reliability first, then quality, then economics. - -## What Not To Copy Blindly - -Some Azure Agentic InfraOps patterns are domain-specific to Azure infrastructure generation and should not be copied into Bosun wholesale. - -Examples: - -- Azure-specific governance terminology -- IaC-specific approval stage count -- AVM and Well-Architected checks as Bosun core concepts - -Bosun should copy the orchestration pattern, not the infrastructure domain framing. - -## Recommended Bosun Positioning - -The clearest framing after this comparison is: - -> Azure Agentic InfraOps operationalizes several Bosun patterns for one domain. -> Bosun can improve in return by making those same patterns more explicit, durable, and policy-driven at the platform level. - -In other words, the best inspiration to take back is: - -- stronger state contracts -- stronger gate contracts -- stronger handoff validation -- stronger maker-checker loops -- stronger deterministic completion rules - -## Proposed Follow-Up Work - -If we decide to implement this, the next useful artifacts would be: - -1. a Bosun RFC for `run-ledger.json` -2. a config proposal for typed approval gates -3. a workflow-engine proposal for transition validators -4. a reusable challenger template for maker-checker flows -5. a stop-condition spec shared by monitor and workflow runtime diff --git a/agent/agent-event-bus.mjs b/agent/agent-event-bus.mjs index 5f7d10d47..91ffe491b 100644 --- a/agent/agent-event-bus.mjs +++ b/agent/agent-event-bus.mjs @@ -385,11 +385,6 @@ export class AgentEventBus { * @param {object} body — { hasCommits, branch, prUrl, prNumber, output } */ onAgentComplete(taskId, body = {}) { - const task = this._resolveTask(taskId); - const reviewStatus = String(task?.reviewStatus || "").trim().toLowerCase(); - const reviewIssueCount = Array.isArray(task?.reviewIssues) - ? task.reviewIssues.length - : 0; this.emit(AGENT_EVENT.AGENT_COMPLETE, taskId, { hasCommits: !!body.hasCommits, branch: body.branch || null, @@ -403,34 +398,8 @@ export class AgentEventBus { } catch { /* best-effort */ } } - if ( - body.hasCommits && - reviewStatus === "changes_requested" && - typeof this._sendTelegram === "function" - ) { - const branch = String(body.branch || task?.branchName || "").trim(); - const prNumber = Number.isFinite(Number(body.prNumber)) - ? Number(body.prNumber) - : null; - const prUrl = String(body.prUrl || task?.prUrl || "").trim(); - const title = task?.title || taskId; - const lines = [ - ":check: Review changes implemented", - `Task: ${title}`, - "Summary: Bosun completed a remediation pass and moved the task back into review.", - reviewIssueCount ? `Issues addressed: ${reviewIssueCount}` : "", - branch ? `Branch: ${branch}` : "", - prNumber ? `PR: #${prNumber}` : prUrl ? `PR: ${prUrl}` : "", - ] - .filter(Boolean) - .join("\n"); - this._sendTelegram(lines, { - dedupKey: `review-fix-complete|${taskId}|${prNumber || prUrl || branch || "unknown"}`, - exactDedup: true, - }); - } - if (this._reviewAgent) { + const task = this._resolveTask(taskId); if (task) this._triggerAutoReview(task, body); } } diff --git a/agent/agent-hooks.mjs b/agent/agent-hooks.mjs index 68497b529..50791744f 100644 --- a/agent/agent-hooks.mjs +++ b/agent/agent-hooks.mjs @@ -51,9 +51,6 @@ const MAX_OUTPUT_BYTES = 64 * 1024; /** Whether we're running on Windows */ const IS_WINDOWS = process.platform === "win32"; -/** Preferred Windows shell for hook execution. */ -const WINDOWS_SHELL = process.env.ComSpec || "cmd.exe"; - /** Default max retries for retryable hooks */ const DEFAULT_MAX_RETRIES = 2; @@ -844,42 +841,6 @@ function _buildEnv(ctx) { return env; } -function _getSpawnCommand(command) { - const trimmed = String(command ?? "").trim(); - - if (IS_WINDOWS) { - const lower = trimmed.toLowerCase(); - if (lower.startsWith("powershell ") || lower.startsWith("powershell.exe ")) { - const inlineCommand = trimmed - .replace(/^powershell(?:\.exe)?\s+-NoProfile\s+-Command\s+/i, "") - .replace(/^powershell(?:\.exe)?\s+-Command\s+/i, "") - .replace(/^"|"$/g, ""); - return { - file: "powershell.exe", - args: ["-NoProfile", "-Command", inlineCommand], - }; - } - if (lower.startsWith("cmd ") || lower.startsWith("cmd.exe ")) { - const inlineCommand = trimmed - .replace(/^cmd(?:\.exe)?\s+\/d\s+\/s\s+\/c\s+/i, "") - .replace(/^cmd(?:\.exe)?\s+\/c\s+/i, ""); - return { - file: WINDOWS_SHELL, - args: ["/d", "/s", "/c", inlineCommand], - }; - } - return { - file: WINDOWS_SHELL, - args: ["/d", "/s", "/c", trimmed], - }; - } - - return { - file: "/bin/sh", - args: ["-c", trimmed], - }; -} - // ── Internal: Synchronous Hook Execution ──────────────────────────────────── /** @@ -931,13 +892,12 @@ function _executeHookSync(hook, ctx, env) { }; try { - const spawnTarget = _getSpawnCommand(hook.command); - const result = spawnSync(spawnTarget.file, spawnTarget.args, { + const result = spawnSync(hook.command, { cwd, env: hookEnv, encoding: "utf8", timeout, - shell: false, + shell: true, windowsHide: true, maxBuffer: MAX_OUTPUT_BYTES, }); @@ -1039,11 +999,10 @@ function _executeHookAsyncOnce(hook, ctx, env, attempt) { let child; try { - const spawnTarget = _getSpawnCommand(hook.command); - child = spawn(spawnTarget.file, spawnTarget.args, { + child = spawn(hook.command, { cwd, env: hookEnv, - shell: false, + shell: true, windowsHide: true, stdio: ["ignore", "pipe", "pipe"], }); @@ -1294,10 +1253,3 @@ export function registerLibraryHooks(hooksByEvent) { } return { registered, skipped }; } - - - - - - - diff --git a/agent/agent-pool.mjs b/agent/agent-pool.mjs index 0fa9285aa..06e53f00f 100644 --- a/agent/agent-pool.mjs +++ b/agent/agent-pool.mjs @@ -1,8 +1,3 @@ -// CLAUDE:SUMMARY — agent-pool -// Resolves SDK selection and fallback order for ephemeral agent threads, -// including env/config-driven failover and launch orchestration across Codex, -// Copilot, Claude, and compatible adapters. - /** * agent-pool.mjs — Universal SDK-Aware Ephemeral Agent Pool * @@ -44,15 +39,12 @@ * getAvailableSdks() → returns list of non-disabled SDKs */ -import { randomUUID } from "node:crypto"; import { resolve, dirname } from "node:path"; import { existsSync, readFileSync } from "node:fs"; import { homedir } from "node:os"; import { fileURLToPath } from "node:url"; import { createRequire } from "node:module"; -import "../infra/windows-hidden-child-processes.mjs"; import { loadConfig } from "../config/config.mjs"; -import { resolveAgentSdkModuleEntry, resolveCodexSdkInstall } from "./agent-sdk.mjs"; import { resolveRepoRoot, resolveAgentRepoRoot } from "../config/repo-root.mjs"; import { resolveCodexProfileRuntime, readCodexConfigRuntimeDefaults } from "../shell/codex-model-profiles.mjs"; import { buildTaskWritableRoots } from "../shell/codex-config.mjs"; @@ -106,390 +98,35 @@ const HARD_TIMEOUT_BUFFER_MS = 5 * 60_000; // 5 minutes /** Tag for console logging */ const TAG = "[agent-pool]"; -const DEFAULT_AGENT_EXECUTION_MAX_PARALLEL = 3; const require = createRequire(import.meta.url); -const CODEX_SDK_SPECIFIER = "@openai/codex-sdk"; const MODULE_PRESENCE_CACHE = new Map(); -const activeAgentExecutionSlots = new Map(); -const queuedAgentExecutionSlots = []; - -function resolveCodexWindowsRuntime() { - if (process.platform !== "win32") { - return { supported: false, packageName: null, binaryPath: null }; - } - - const runtimeMap = { - x64: { - packageName: "@openai/codex-win32-x64", - binaryParts: ["vendor", "x86_64-pc-windows-msvc", "codex", "codex.exe"], - }, - arm64: { - packageName: "@openai/codex-win32-arm64", - binaryParts: ["vendor", "aarch64-pc-windows-msvc", "codex", "codex.exe"], - }, - }; - - const runtimeInfo = runtimeMap[process.arch]; - if (!runtimeInfo) { - return { supported: false, packageName: null, binaryPath: null }; - } - - try { - const runtimePkgJson = require.resolve(`${runtimeInfo.packageName}/package.json`); - return { - supported: true, - packageName: runtimeInfo.packageName, - binaryPath: resolve(dirname(runtimePkgJson), ...runtimeInfo.binaryParts), - }; - } catch { - return { - supported: true, - packageName: runtimeInfo.packageName, - binaryPath: null, - }; - } -} - -function getCodexRuntimePrerequisiteFailure() { - const runtime = resolveCodexWindowsRuntime(); - if (!runtime.supported) return null; - if (!runtime.binaryPath) { - return `${runtime.packageName} not installed`; - } - if (!existsSync(runtime.binaryPath)) { - return `Codex SDK runtime missing at ${runtime.binaryPath}`; - } - return null; -} - -function isDeterministicSdkFailure(errorValue) { - const message = String(errorValue || "").toLowerCase(); - if (!message) return false; - if (message.includes("failed to list models") && message.includes("400")) { - return true; - } - if (message.includes("enoent")) return true; - if (message.includes("sdk runtime missing")) return true; - if (message.includes("sdk not available")) return true; - if (message.includes("not installed")) return true; - return false; -} - -function parsePositiveInt(value) { - const parsed = Number(value); - if (!Number.isFinite(parsed) || parsed <= 0) return null; - return Math.max(1, Math.trunc(parsed)); -} - -function resolveAgentExecutionMaxParallel(explicit = null) { - const direct = parsePositiveInt(explicit); - if (direct) return direct; - const envValue = - parsePositiveInt(process.env.AGENT_POOL_MAX_PARALLEL) || - parsePositiveInt(process.env.WORKFLOW_AGENT_MAX_PARALLEL); - if (envValue) return envValue; - try { - const cfg = loadConfig(); - const configValue = - parsePositiveInt(cfg?.agentPool?.maxParallel) || - parsePositiveInt(cfg?.internalExecutor?.maxParallel); - if (configValue) return configValue; - } catch { - // Best-effort only; fall back to a safe default. - } - return DEFAULT_AGENT_EXECUTION_MAX_PARALLEL; -} - -function summarizeAgentSlotMeta(meta = {}) { - if (!meta || typeof meta !== "object") return {}; - const summary = {}; - const scalarKeys = [ - "taskKey", - "taskId", - "taskTitle", - "workflowRunId", - "workflowId", - "workflowName", - "workflowNodeId", - "workflowNodeLabel", - "cwd", - "sdk", - "model", - "sessionType", - ]; - for (const key of scalarKeys) { - const value = meta[key]; - if (value == null) continue; - const normalized = String(value).trim(); - if (normalized) summary[key] = normalized; - } - return summary; -} - -function emitAgentSlotHook(hook, payload) { - if (typeof hook !== "function") return; - try { - hook(payload); - } catch { - // Slot telemetry must never break agent execution. - } -} - -function buildAgentSlotSnapshot(overrides = {}) { - const maxParallel = resolveAgentExecutionMaxParallel(overrides.maxParallel); - return { - maxParallel, - activeSlots: activeAgentExecutionSlots.size, - queuedSlots: queuedAgentExecutionSlots.length, - ...overrides, - }; -} - -function grantQueuedAgentExecutionSlot(request) { - const grantedAt = Date.now(); - const lease = { - slotId: request.slotId, - ownerKey: request.ownerKey, - requestedAt: request.requestedAt, - queuedAt: request.queuedAt, - acquiredAt: grantedAt, - waitedMs: Math.max(0, grantedAt - request.requestedAt), - maxParallel: request.maxParallel, - meta: request.meta, - onReleased: request.onReleased, - }; - activeAgentExecutionSlots.set(lease.slotId, lease); - const payload = buildAgentSlotSnapshot({ - slotId: lease.slotId, - ownerKey: lease.ownerKey, - queuedAt: lease.queuedAt, - acquiredAt: lease.acquiredAt, - requestedAt: lease.requestedAt, - waitedMs: lease.waitedMs, - maxParallel: lease.maxParallel, - meta: lease.meta, - }); - emitAgentSlotHook(request.onAcquired, payload); - request.resolve(lease); -} - -function pumpQueuedAgentExecutionSlots() { - while (queuedAgentExecutionSlots.length > 0) { - const next = queuedAgentExecutionSlots[0]; - const maxParallel = resolveAgentExecutionMaxParallel(next.maxParallel); - if (activeAgentExecutionSlots.size >= maxParallel) break; - queuedAgentExecutionSlots.shift(); - grantQueuedAgentExecutionSlot(next); - } -} - -export function getAvailableSlots(maxParallel = null) { - return Math.max( - 0, - resolveAgentExecutionMaxParallel(maxParallel) - activeAgentExecutionSlots.size, - ); -} - -export function getAgentExecutionSlotStatus() { - return { - maxParallel: resolveAgentExecutionMaxParallel(), - activeSlots: activeAgentExecutionSlots.size, - queuedSlots: queuedAgentExecutionSlots.length, - active: Array.from(activeAgentExecutionSlots.values()).map((lease) => ({ - slotId: lease.slotId, - ownerKey: lease.ownerKey, - requestedAt: lease.requestedAt, - queuedAt: lease.queuedAt, - acquiredAt: lease.acquiredAt, - waitedMs: lease.waitedMs, - maxParallel: lease.maxParallel, - meta: { ...lease.meta }, - })), - queued: queuedAgentExecutionSlots.map((request) => ({ - slotId: request.slotId, - ownerKey: request.ownerKey, - requestedAt: request.requestedAt, - queuedAt: request.queuedAt, - maxParallel: request.maxParallel, - meta: { ...request.meta }, - })), - }; -} - -export async function allocateSlot(ownerKey = "", options = {}) { - const normalizedOwnerKey = - String( - ownerKey || - options.taskKey || - options.taskId || - options.workflowRunId || - options.workflowId || - "", - ).trim() || `agent-slot:${randomUUID()}`; - const slotId = `agent-slot-${randomUUID().slice(0, 8)}`; - const requestedAt = Date.now(); - const maxParallel = resolveAgentExecutionMaxParallel(options.maxParallel); - const meta = summarizeAgentSlotMeta({ - ...options.meta, - taskKey: options.taskKey, - taskId: options.taskId, - taskTitle: options.taskTitle, - workflowRunId: options.workflowRunId, - workflowId: options.workflowId, - workflowName: options.workflowName, - workflowNodeId: options.workflowNodeId, - workflowNodeLabel: options.workflowNodeLabel, - cwd: options.cwd, - sdk: options.sdk, - model: options.model, - sessionType: options.sessionType, - }); - const request = { - slotId, - ownerKey: normalizedOwnerKey, - requestedAt, - queuedAt: null, - maxParallel, - meta, - onAcquired: options.onAcquired, - onReleased: options.onReleased, - resolve: null, - reject: null, - }; - - if (activeAgentExecutionSlots.size < maxParallel) { - return await new Promise((resolve) => { - request.resolve = resolve; - grantQueuedAgentExecutionSlot(request); - }); - } - - request.queuedAt = Date.now(); - emitAgentSlotHook( - options.onQueued, - buildAgentSlotSnapshot({ - slotId, - ownerKey: normalizedOwnerKey, - requestedAt, - queuedAt: request.queuedAt, - maxParallel, - queuedSlots: queuedAgentExecutionSlots.length + 1, - meta, - queueDepth: queuedAgentExecutionSlots.length + 1, - }), - ); - return await new Promise((resolve, reject) => { - request.resolve = resolve; - request.reject = reject; - queuedAgentExecutionSlots.push(request); - }); -} - -export async function releaseSlot(slotRef = null) { - const slotId = - typeof slotRef === "string" - ? slotRef - : String(slotRef?.slotId || "").trim(); - if (!slotId) { - return buildAgentSlotSnapshot({ released: false, reason: "missing_slot_id" }); - } - const lease = activeAgentExecutionSlots.get(slotId); - if (!lease) { - return buildAgentSlotSnapshot({ released: false, slotId, reason: "slot_not_found" }); - } - activeAgentExecutionSlots.delete(slotId); - const releasedAt = Date.now(); - const payload = buildAgentSlotSnapshot({ - slotId, - ownerKey: lease.ownerKey, - requestedAt: lease.requestedAt, - queuedAt: lease.queuedAt, - acquiredAt: lease.acquiredAt, - releasedAt, - waitedMs: lease.waitedMs, - runDurationMs: Math.max(0, releasedAt - lease.acquiredAt), - maxParallel: lease.maxParallel, - meta: lease.meta, - released: true, - }); - emitAgentSlotHook(lease.onReleased, payload); - pumpQueuedAgentExecutionSlots(); - return payload; -} - -async function withAgentExecutionSlot(cwd, extra = {}, runner) { - if (extra?.slotLease) { - return await runner(extra.slotLease); - } - const slotOwnerKey = - String( - extra?.slotOwnerKey || - extra?.taskKey || - extra?.taskId || - extra?.workflowRunId || - extra?.workflowId || - "", - ).trim() || `agent-slot:${randomUUID()}`; - let slotLease = null; - try { - slotLease = await allocateSlot(slotOwnerKey, { - taskKey: extra?.taskKey, - taskId: extra?.taskId, - taskTitle: extra?.taskTitle, - workflowRunId: extra?.workflowRunId, - workflowId: extra?.workflowId, - workflowName: extra?.workflowName, - workflowNodeId: extra?.workflowNodeId, - workflowNodeLabel: extra?.workflowNodeLabel, - cwd, - sdk: extra?.sdk, - model: extra?.model, - sessionType: extra?.sessionType, - meta: extra?.slotMeta, - maxParallel: extra?.slotMaxParallel, - onQueued: extra?.onSlotQueued, - onAcquired: extra?.onSlotAcquired, - onReleased: extra?.onSlotReleased, - }); - return await runner(slotLease); - } finally { - if (slotLease?.slotId) { - await releaseSlot(slotLease); - } - } -} function hasOptionalModule(specifier) { if (MODULE_PRESENCE_CACHE.has(specifier)) { return MODULE_PRESENCE_CACHE.get(specifier); } let ok = false; - if (specifier === CODEX_SDK_SPECIFIER) { - ok = Boolean(resolveCodexSdkInstall({ extraRoots: [getAgentRepoRoot(), process.cwd()] })); - } else { + try { + require.resolve(specifier); + ok = true; + } catch { + // ESM-only packages have no CJS "require" export so require.resolve + // throws even when the package is installed. Fall back to checking + // whether the package directory exists on disk. try { - require.resolve(specifier); - ok = true; + const pkgDir = resolve(__dirname, "..", "node_modules", ...specifier.split("/")); + ok = existsSync(resolve(pkgDir, "package.json")); } catch { - ok = Boolean( - resolveAgentSdkModuleEntry(specifier, { extraRoots: [getAgentRepoRoot(), process.cwd()] }), - ); + ok = false; } } MODULE_PRESENCE_CACHE.set(specifier, ok); return ok; } - -async function importCodexSdkModule() { - return import(CODEX_SDK_SPECIFIER); -} const MAX_PROMPT_BYTES = 180_000; const MAX_SET_TIMEOUT_MS = 2_147_483_647; // Node.js setTimeout 32-bit signed max let timeoutClampWarningKey = ""; -const MIN_SILENT_STREAM_FRACTION = 0.25; -const MAX_SILENT_STREAM_GRACE_MS = 30_000; const DEFAULT_FIRST_EVENT_TIMEOUT_MS = 120_000; -const COPILOT_IDLE_TIMEOUT_GRACE_MS = 1_000; const DEFAULT_MAX_ITEMS_PER_TURN = 600; const DEFAULT_MAX_ITEM_CHARS = 12_000; const TOOL_OUTPUT_GUARDRAIL = String.raw` @@ -624,14 +261,6 @@ async function maybeCompressResultItems( } function resolveCodexStreamSafety(totalTimeoutMs) { - const minSilentStreamFraction = - typeof MIN_SILENT_STREAM_FRACTION === "number" && Number.isFinite(MIN_SILENT_STREAM_FRACTION) - ? MIN_SILENT_STREAM_FRACTION - : 0.25; - const maxSilentStreamGraceMs = - typeof MAX_SILENT_STREAM_GRACE_MS === "number" && Number.isFinite(MAX_SILENT_STREAM_GRACE_MS) - ? MAX_SILENT_STREAM_GRACE_MS - : 30_000; const streamCfg = getInternalExecutorStreamConfig(); const firstEventRaw = process.env.INTERNAL_EXECUTOR_STREAM_FIRST_EVENT_TIMEOUT_MS || @@ -657,17 +286,10 @@ function resolveCodexStreamSafety(totalTimeoutMs) { let firstEventTimeoutMs = null; if (Number.isFinite(budgetMs) && budgetMs > 2_000) { const maxAllowed = Math.max(1_000, budgetMs - 1_000); - const silentBudgetFloor = Math.min( - maxAllowed, - Math.max( - configuredFirstEventMs, - Math.min( - Math.trunc(budgetMs * minSilentStreamFraction), - maxSilentStreamGraceMs, - ), - ), + firstEventTimeoutMs = clampTimerDelayMs( + Math.min(configuredFirstEventMs, maxAllowed), + "first-event-timeout", ); - firstEventTimeoutMs = clampTimerDelayMs(silentBudgetFloor, "first-event-timeout"); } return { @@ -722,11 +344,6 @@ function envFlagEnabled(value) { return ["1", "true", "yes", "on", "y"].includes(raw); } -function normalizeRequestedMcpServerIds(value) { - if (!Array.isArray(value)) return []; - return [...new Set(value.map((item) => String(item || "").trim()).filter(Boolean))]; -} - function applyNodeWarningSuppressionEnv(runtimeEnv) { const nextEnv = { ...(runtimeEnv || {}) }; if (String(process.env.BOSUN_SUPPRESS_NODE_WARNINGS ?? "").trim() === "0") { @@ -910,9 +527,6 @@ function shouldFallbackForSdkError(error) { if (!error) return false; const message = String(error).toLowerCase(); if (!message) return false; - if (message.includes("failed to list models") && (message.includes("400") || message.includes("bad request"))) { - return true; - } if (message.includes("protocol version mismatch")) return true; if (message.includes("sdk expects version") && message.includes("server reports version")) { return true; @@ -953,7 +567,6 @@ function shouldFallbackForSdkError(error) { if (message.includes("connection refused")) return true; if (message.includes("connection reset")) return true; if (message.includes("etimedout")) return true; - if (message.includes("failed to list models")) return true; // Runtime/provider instability: fail over to next SDK immediately. if (message.includes("timeout")) return true; if (message.includes("rate limit") || message.includes("429")) return true; @@ -993,10 +606,6 @@ function hasSdkPrerequisites(name, runtimeEnv = process.env) { if (!hasOptionalModule("@openai/codex-sdk")) { return { ok: false, reason: "@openai/codex-sdk not installed" }; } - const runtimeFailure = getCodexRuntimePrerequisiteFailure(); - if (runtimeFailure) { - return { ok: false, reason: runtimeFailure }; - } // Codex auth can come from env vars, config env_key mappings, or persisted // CLI login state (for example ~/.codex/auth.json). Because login-based // auth is valid and hard to validate exhaustively, avoid false negatives. @@ -1190,22 +799,6 @@ function buildCodexSdkOptions(envInput = process.env, options = {}) { return false; } }; - const getAzureProviderEndpointEnvKeys = (sectionName) => { - const normalizedName = String(sectionName || "").trim().toUpperCase().replace(/[^A-Z0-9]+/g, "_"); - const keys = ["AZURE_OPENAI_ENDPOINT"]; - if (normalizedName) { - keys.push(`${normalizedName}_ENDPOINT`); - keys.push(`${normalizedName}_BASE_URL`); - if (normalizedName.startsWith("AZURE_")) { - const suffix = normalizedName.slice("AZURE_".length); - if (suffix) { - keys.push(`AZURE_${suffix}_ENDPOINT`); - keys.push(`AZURE_${suffix}_BASE_URL`); - } - } - } - return [...new Set(keys)]; - }; const isAzure = isAzureOpenAIBaseUrl(baseUrl); const env = { ...resolvedEnv }; const unsetEnvKeys = []; @@ -1249,22 +842,11 @@ function buildCodexSdkOptions(envInput = process.env, options = {}) { if (!otherEnvKey || otherEnvKey === providerEnvKey) continue; delete env[otherEnvKey]; if (!unsetEnvKeys.includes(otherEnvKey)) unsetEnvKeys.push(otherEnvKey); - for (const endpointKey of getAzureProviderEndpointEnvKeys(sectionName)) { - if (endpointKey === "AZURE_OPENAI_ENDPOINT") continue; - delete env[endpointKey]; - if (!unsetEnvKeys.includes(endpointKey)) unsetEnvKeys.push(endpointKey); - } } } catch { // best effort — if config reading fails, don't block execution } - for (const key of Object.keys(env)) { - if (!key.startsWith("AZURE_OPENAI_API_KEY") || key === providerEnvKey) continue; - delete env[key]; - if (!unsetEnvKeys.includes(key)) unsetEnvKeys.push(key); - } - return { env, unsetEnvKeys, @@ -1333,18 +915,6 @@ const SDK_ADAPTERS = { */ let SDK_FALLBACK_ORDER = ["codex", "copilot", "claude"]; -function getSdkFallbackOrder() { - const envOrder = String(process.env.BOSUN_AGENT_POOL_FALLBACK_ORDER || "").trim(); - if (envOrder) { - const parsed = envOrder - .split(",") - .map((value) => String(value || "").trim().toLowerCase()) - .filter((value, index, arr) => SDK_ADAPTERS[value] && arr.indexOf(value) === index); - if (parsed.length > 0) return parsed; - } - return SDK_FALLBACK_ORDER; -} - // Attempt to load custom fallback order from config try { const cfg = loadConfig(); @@ -1405,21 +975,6 @@ function shouldApplySdkCooldown(error) { if (!error) return false; const message = String(error).toLowerCase(); if (!message) return false; - - if ( - message.includes("failed to list models") - && ( - message.includes("400") - || - message.includes("bad request") - || message.includes("invalid url") - || message.includes("deployment") - || message.includes("api version") - || message.includes("/models") - ) - ) { - return false; - } if (message.includes("failed to list models")) return true; if (message.includes("protocol version mismatch")) return true; if (message.includes("sdk expects version") && message.includes("server reports version")) { @@ -1589,7 +1144,7 @@ function resolvePoolSdkName() { } // 4. Fallback chain: first non-disabled SDK - for (const name of getSdkFallbackOrder()) { + for (const name of SDK_FALLBACK_ORDER) { if (!isDisabled(name)) { resolvedSdkName = name; logResolution(name, "fallback chain"); @@ -1705,7 +1260,7 @@ async function launchCodexThread(prompt, cwd, timeoutMs, extra = {}) { // ── 1. Load the SDK ────────────────────────────────────────────────────── let CodexClass; try { - const mod = await importCodexSdkModule(); + const mod = await import("@openai/codex-sdk"); CodexClass = mod.Codex; if (!CodexClass) throw new Error("Codex export not found in SDK module"); } catch (err) { @@ -1816,17 +1371,9 @@ async function launchCodexThread(prompt, cwd, timeoutMs, extra = {}) { ? `${String(systemPrompt).trim()}\n\n---\n\n${prompt}` : prompt; const safePrompt = sanitizeAndBoundPrompt(`${anchoredPrompt}${TOOL_OUTPUT_GUARDRAIL}`); - const turn = await Promise.race([ - thread.runStreamed(safePrompt, { - signal: controller.signal, - }), - new Promise((_, reject) => { - hardTimer = setTimeout( - () => reject(new Error("hard_timeout")), - clampTimerDelayMs(timeoutMs + HARD_TIMEOUT_BUFFER_MS, "codex-hard-timeout"), - ); - }), - ]); + const turn = await thread.runStreamed(safePrompt, { + signal: controller.signal, + }); let finalResponse = ""; const allItems = []; @@ -1835,7 +1382,6 @@ async function launchCodexThread(prompt, cwd, timeoutMs, extra = {}) { // The soft timeout fires controller.abort() which the SDK should honor. // The hard timeout is a safety net in case the SDK iterator ignores the abort. const hardTimeoutPromise = new Promise((_, reject) => { - if (hardTimer) return; hardTimer = setTimeout( () => reject(new Error("hard_timeout")), clampTimerDelayMs(timeoutMs + HARD_TIMEOUT_BUFFER_MS, "codex-hard-timeout"), @@ -2929,7 +2475,6 @@ export async function launchEphemeralThread( timeoutMs = DEFAULT_TIMEOUT_MS, extra = {}, ) { - return await withAgentExecutionSlot(cwd, extra, async (slotLease) => { const resolvedGithubToken = await resolveGithubSessionToken(); const baseRuntimeEnv = extra?.envOverrides && typeof extra.envOverrides === "object" @@ -2939,7 +2484,6 @@ export async function launchEphemeralThread( const launchExtra = { ...extra, envOverrides: sessionEnv, - slotLease, }; // ── Resolve MCP servers for this launch ────────────────────────────────── @@ -2948,26 +2492,19 @@ export async function launchEphemeralThread( const cfg = loadConfig(); const mcpCfg = cfg.mcpServers || {}; if (mcpCfg.enabled !== false) { - const hasExplicitMcpSelection = Array.isArray(launchExtra.mcpServers); - const requestedIds = normalizeRequestedMcpServerIds(launchExtra.mcpServers); - const defaultIds = !hasExplicitMcpSelection && mcpCfg.allowDefaultServers === true - ? mcpCfg.defaultServers || [] - : []; + const requestedIds = launchExtra.mcpServers || []; + const defaultIds = mcpCfg.defaultServers || []; const registry = await getMcpRegistry(); let resolved = []; if (requestedIds.length || defaultIds.length) { resolved = await registry.resolveMcpServersForAgent( cwd, requestedIds, - { - defaultServers: defaultIds, - catalogOverrides: mcpCfg.catalogOverrides || {}, - requireAuth: mcpCfg.requireAuth !== false, - }, + { defaultServers: defaultIds, catalogOverrides: mcpCfg.catalogOverrides || {} }, ); } - if (resolved.length && typeof registry.wrapServersWithDiscoveryProxy === "function") { - resolved = await registry.wrapServersWithDiscoveryProxy(cwd, resolved, { + if (typeof registry.wrapServersWithDiscoveryProxy === "function") { + resolved = registry.wrapServersWithDiscoveryProxy(cwd, resolved, { enabled: mcpCfg.useDiscoveryProxy !== false, includeCustomTools: mcpCfg.includeCustomToolsInDiscoveryProxy !== false, cacheTtlMs: mcpCfg.discoveryProxyCacheTtlMs, @@ -2981,8 +2518,7 @@ export async function launchEphemeralThread( launchExtra._mcpResolved = true; } } catch (mcpErr) { - launchExtra._resolvedMcpServers = []; - console.warn(`${TAG} MCP server resolution failed (servers skipped): ${mcpErr.message}`); + console.warn(`${TAG} MCP server resolution failed (non-fatal): ${mcpErr.message}`); } // Determine the primary SDK to try @@ -2999,7 +2535,7 @@ export async function launchEphemeralThread( ? [primaryName] : [ primaryName, - ...getSdkFallbackOrder().filter((name) => name !== primaryName), + ...SDK_FALLBACK_ORDER.filter((name) => name !== primaryName), ]; let lastAttemptResult = null; @@ -3273,7 +2809,6 @@ export async function launchEphemeralThread( sdk: primaryName, threadId: null, }; - }); } // --------------------------------------------------------------------------- @@ -3312,7 +2847,6 @@ export async function execPooledPrompt(userMessage, options = {}) { cwd = REPO_ROOT, sdk, model, - mcpServers, sessionType = "ephemeral", forceContextShredding = false, skipContextShredding = false, @@ -3326,7 +2860,6 @@ export async function execPooledPrompt(userMessage, options = {}) { abortController, sdk, model, - mcpServers, }); if (!result.success) { @@ -3681,7 +3214,7 @@ async function resumeCodexThread(threadId, prompt, cwd, timeoutMs, extra = {}) { let CodexClass; try { - const mod = await importCodexSdkModule(); + const mod = await import("@openai/codex-sdk"); CodexClass = mod.Codex; if (!CodexClass) throw new Error("Codex export not found"); } catch (err) { @@ -3768,23 +3301,14 @@ async function resumeCodexThread(threadId, prompt, cwd, timeoutMs, extra = {}) { try { const safePrompt = sanitizeAndBoundPrompt(prompt); - const turn = await Promise.race([ - thread.runStreamed(safePrompt, { - signal: controller.signal, - }), - new Promise((_, reject) => { - hardTimer = setTimeout( - () => reject(new Error("hard_timeout")), - clampTimerDelayMs(timeoutMs + HARD_TIMEOUT_BUFFER_MS, "codex-resume-hard-timeout"), - ); - }), - ]); + const turn = await thread.runStreamed(safePrompt, { + signal: controller.signal, + }); let finalResponse = ""; const allItems = []; // Hard timeout safety net (same as launchCodexThread) const hardTimeoutPromise = new Promise((_, reject) => { - if (hardTimer) return; hardTimer = setTimeout( () => reject(new Error("hard_timeout")), clampTimerDelayMs(timeoutMs + HARD_TIMEOUT_BUFFER_MS, "codex-resume-hard-timeout"), @@ -3930,7 +3454,6 @@ export async function launchOrResumeThread( timeoutMs = DEFAULT_TIMEOUT_MS, extra = {}, ) { - return await withAgentExecutionSlot(cwd, extra, async (slotLease) => { await ensureThreadRegistryLoaded(); const { taskKey, ...restExtra } = extra; const resolvedGithubToken = await resolveGithubSessionToken(); @@ -3943,7 +3466,6 @@ export async function launchOrResumeThread( resolvedGithubToken, ); restExtra.envOverrides = applyNodeWarningSuppressionEnv(restExtra.envOverrides); - restExtra.slotLease = slotLease; // Pass taskKey through as steer key so SDK launchers can register active sessions restExtra.taskKey = taskKey; if (restExtra.sdk && restExtra.pinSdk === true) { @@ -4250,105 +3772,12 @@ export async function launchOrResumeThread( } return { ...result, threadId: finalThreadId, resumed: false }; - }); } // --------------------------------------------------------------------------- // Error Recovery Wrapper // --------------------------------------------------------------------------- -const RETRY_OUTPUT_PLACEHOLDERS = new Set([ - "", - "(agent completed with no text output)", - "continued", - "model response continued", -]); - -const RETRY_RECONNECT_PATTERNS = [ - /session\.idle/i, - /no events received/i, - /first_event_timeout/i, - /timeout_no_events/i, - /stream disconnection/i, - /transient stream error/i, - /transport/i, - /network/i, - /econnreset/i, - /socket hang up/i, - /connection.*closed/i, - /connection.*reset/i, - /reconnect/i, -]; - -function hasMeaningfulRetryResult(result = {}) { - const output = String(result?.output || "").replace(/\s+/g, " ").trim().toLowerCase(); - if (!RETRY_OUTPUT_PLACEHOLDERS.has(output)) return true; - if (Array.isArray(result?.items) && result.items.length > 0) return true; - return false; -} - -function normalizeRetryFailureFingerprint(result = {}) { - const errorText = String(result?.error || "").replace(/\s+/g, " ").trim().toLowerCase(); - if (!errorText) return ""; - if (RETRY_RECONNECT_PATTERNS.some((pattern) => pattern.test(errorText))) { - return errorText - .replace(/\bafter \d+ms\b/g, "after ") - .replace(/attempt \d+\/\d+/g, "attempt /") - .replace(/[a-f0-9]{8,}/g, "") - .slice(0, 160); - } - return ""; -} - -function classifyRetryCircuitBreak(result = {}, state = {}) { - if (result?.success) { - return { - fingerprint: "", - repeatedFingerprint: false, - noOutputFailure: false, - shouldBreak: false, - blockedReason: null, - error: null, - }; - } - - const fingerprint = normalizeRetryFailureFingerprint(result); - const repeatedFingerprint = Boolean(fingerprint) && Number(state.failureFingerprints?.get(fingerprint) || 0) >= 1; - const noOutputFailure = !hasMeaningfulRetryResult(result); - const repeatedNoOutput = noOutputFailure && Number(state.consecutiveNoOutputFailures || 0) >= 1; - - if (repeatedFingerprint) { - return { - fingerprint, - repeatedFingerprint: true, - noOutputFailure, - shouldBreak: true, - blockedReason: "blocked_by_env", - error: `Repeated reconnect fingerprint detected: ${fingerprint}`, - }; - } - - if (repeatedNoOutput) { - return { - fingerprint, - repeatedFingerprint: false, - noOutputFailure: true, - shouldBreak: true, - blockedReason: "no_output", - error: "Repeated no-output agent starts detected; stopping retries", - }; - } - - return { - fingerprint, - repeatedFingerprint: false, - noOutputFailure, - shouldBreak: false, - blockedReason: null, - error: null, - }; -} - /** * Execute a prompt with automatic error recovery via thread resume. * @@ -4391,16 +3820,9 @@ export async function execWithRetry(prompt, options = {}) { buildContinuePrompt, sdk, model, - mcpServers, sessionType = "task", onEvent, onAbortControllerReplaced, - slotOwnerKey, - slotMeta, - slotMaxParallel, - onSlotQueued, - onSlotAcquired, - onSlotReleased, } = options; // AbortController can be replaced on idle_continue, so track it mutably @@ -4416,8 +3838,6 @@ export async function execWithRetry(prompt, options = {}) { const totalAttempts = 1 + maxRetries; let continuesUsed = 0; let attempt = 0; - const failureFingerprints = new Map(); - let consecutiveNoOutputFailures = 0; while (attempt < totalAttempts + continuesUsed) { attempt++; @@ -4487,17 +3907,10 @@ export async function execWithRetry(prompt, options = {}) { taskKey, sdk, model, - mcpServers, sessionType, onEvent, abortController, ignoreSdkCooldown: attempt > 1, - slotOwnerKey, - slotMeta, - slotMaxParallel, - onSlotQueued, - onSlotAcquired, - onSlotReleased, }); // Check post-launch if aborted with idle_continue (race: abort fired during execution) @@ -4525,33 +3938,6 @@ export async function execWithRetry(prompt, options = {}) { continue; } - const retryCircuit = classifyRetryCircuitBreak(lastResult, { - failureFingerprints, - consecutiveNoOutputFailures, - }); - if (retryCircuit.fingerprint) { - failureFingerprints.set( - retryCircuit.fingerprint, - Number(failureFingerprints.get(retryCircuit.fingerprint) || 0) + 1, - ); - } - consecutiveNoOutputFailures = retryCircuit.noOutputFailure - ? consecutiveNoOutputFailures + 1 - : 0; - - if (retryCircuit.shouldBreak) { - console.warn(`${TAG} execWithRetry circuit breaker tripped for "${taskKey}": ${retryCircuit.error}`); - return { - ...lastResult, - error: retryCircuit.error, - blockedReason: retryCircuit.blockedReason, - retryCircuitBroken: true, - failureFingerprint: retryCircuit.fingerprint || null, - attempts: attempt, - continues: continuesUsed, - }; - } - // Check if we should retry if (lastResult.success) { // If caller has custom shouldRetry (e.g. "output must contain 'PASS'"), check it @@ -4566,12 +3952,6 @@ export async function execWithRetry(prompt, options = {}) { // Failed — should we retry? const retriesLeft = totalAttempts + continuesUsed - attempt; - if (isDeterministicSdkFailure(lastResult.error)) { - console.warn( - `${TAG} attempt ${attempt} hit deterministic SDK failure; retry suppressed: ${lastResult.error}`, - ); - return { ...lastResult, attempts: attempt, continues: continuesUsed }; - } if (retriesLeft > 0) { if (typeof shouldRetry === "function" && !shouldRetry(lastResult)) { // Custom predicate says don't retry @@ -4722,17 +4102,3 @@ export function getActiveThreads() { return result; } - - -export const __testables = { - shouldApplySdkCooldown, - shouldFallbackForSdkError, - hasMeaningfulRetryResult, - normalizeRetryFailureFingerprint, - classifyRetryCircuitBreak, -}; - - - - - diff --git a/agent/agent-prompt-catalog.mjs b/agent/agent-prompt-catalog.mjs index 192cf0643..5e3c8b623 100644 --- a/agent/agent-prompt-catalog.mjs +++ b/agent/agent-prompt-catalog.mjs @@ -442,19 +442,18 @@ You are running as a **Bosun-managed task agent**. Environment variables **After committing:** - If a precommit hook auto-applies additional formatting changes, add those - to a follow-up commit before finishing. + to a follow-up commit before pushing. - Merge any upstream changes — BOTH from the base (module) branch AND from main: \`git fetch origin && git merge origin/ --no-edit && git merge origin/main --no-edit\` - Resolve any conflicts that arise before handing off. -- Run local validation, including the repository pre-push quality gate, before handing off. -- Do not push directly. Bosun workflow automation will perform the validated push and PR lifecycle handoff. + Resolve any conflicts that arise before pushing. +- Push: \`git push --set-upstream origin {{BRANCH}}\` +- After a successful push, hand off PR lifecycle to Bosun management. - Do not run direct PR commands. {{COAUTHOR_INSTRUCTION}} **Do NOT:** -- Push branches directly from the agent session. - Bypass pre-push hooks (\`git push --no-verify\` is forbidden). - Use \`git add .\` — stage files individually. -- Wait for user confirmation before handing off lifecycle state. +- Wait for user confirmation before pushing or handing off lifecycle state. ## Agent Status Endpoint - URL: http://127.0.0.1:{{ENDPOINT_PORT}}/api/tasks/{{TASK_ID}} diff --git a/agent/agent-sdk.mjs b/agent/agent-sdk.mjs index 5b4e8b1d6..0c24ac607 100644 --- a/agent/agent-sdk.mjs +++ b/agent/agent-sdk.mjs @@ -8,11 +8,6 @@ * Capability flags: steering, subagents, vscode_tools */ -import { existsSync, readFileSync } from "node:fs"; -import { createRequire } from "node:module"; -import { dirname, resolve } from "node:path"; -import { fileURLToPath } from "node:url"; -import { resolveAgentRepoRoot, resolveRepoRoot } from "../config/repo-root.mjs"; import { readCodexConfig } from "../shell/codex-config.mjs"; const SUPPORTED_PRIMARY = new Set([ @@ -59,34 +54,6 @@ const DEFAULT_CAPABILITIES = { }; let cachedConfig = null; -const __dirname = dirname(fileURLToPath(import.meta.url)); - -const CODEX_PLATFORM_PACKAGE_MAP = Object.freeze({ - "darwin-arm64": { - packageDir: "@openai/codex-darwin-arm64", - binaryPath: "vendor/aarch64-apple-darwin/codex/codex", - }, - "darwin-x64": { - packageDir: "@openai/codex-darwin-x64", - binaryPath: "vendor/x86_64-apple-darwin/codex/codex", - }, - "linux-arm64": { - packageDir: "@openai/codex-linux-arm64", - binaryPath: "vendor/aarch64-unknown-linux-musl/codex/codex", - }, - "linux-x64": { - packageDir: "@openai/codex-linux-x64", - binaryPath: "vendor/x86_64-unknown-linux-musl/codex/codex", - }, - "win32-arm64": { - packageDir: "@openai/codex-win32-arm64", - binaryPath: "vendor/aarch64-pc-windows-msvc/codex/codex.exe", - }, - "win32-x64": { - packageDir: "@openai/codex-win32-x64", - binaryPath: "vendor/x86_64-pc-windows-msvc/codex/codex.exe", - }, -}); function normalizePrimary(value) { const primary = String(value || "").trim().toLowerCase(); @@ -145,106 +112,6 @@ function parseCapabilities(section) { }; } -function normalizeRootCandidate(rootDir) { - const raw = String(rootDir || "").trim(); - if (!raw) return null; - try { - const resolved = resolve(raw); - return existsSync(resolved) ? resolved : null; - } catch { - return null; - } -} - -function uniqueRoots(roots) { - const seen = new Set(); - const ordered = []; - for (const root of roots) { - const normalized = normalizeRootCandidate(root); - if (!normalized || seen.has(normalized)) continue; - seen.add(normalized); - ordered.push(normalized); - } - return ordered; -} - -function createRequireForRoot(rootDir) { - const packageJson = resolve(rootDir, "package.json"); - if (existsSync(packageJson)) { - return createRequire(packageJson); - } - return createRequire(resolve(rootDir, "__bosun_agent_sdk__.cjs")); -} - -function resolveModuleEntryFromPackageDir(packageDir) { - const packageJsonPath = resolve(packageDir, "package.json"); - if (!existsSync(packageJsonPath)) return null; - try { - const pkg = JSON.parse(readFileSync(packageJsonPath, "utf8")); - const exportRoot = pkg?.exports?.["."] ?? pkg?.exports; - const candidate = - (typeof exportRoot === "string" && exportRoot) || - (exportRoot && typeof exportRoot.import === "string" && exportRoot.import) || - (typeof pkg?.module === "string" && pkg.module) || - (typeof pkg?.main === "string" && pkg.main) || - null; - return candidate ? resolve(packageDir, candidate) : packageJsonPath; - } catch { - return null; - } -} - -function resolveModuleEntryFromRoot(specifier, rootDir) { - try { - return createRequireForRoot(rootDir).resolve(specifier); - } catch { - const packageDir = resolve(rootDir, "node_modules", ...specifier.split("/")); - return resolveModuleEntryFromPackageDir(packageDir); - } -} - -export function getAgentSdkModuleRoots(options = {}) { - const extraRoots = Array.isArray(options.extraRoots) ? options.extraRoots : []; - return uniqueRoots([ - ...extraRoots, - options.rootDir, - process.env.BOSUN_AGENT_REPO_ROOT, - resolveAgentRepoRoot(), - resolveRepoRoot({ cwd: process.cwd() }), - process.cwd(), - resolve(__dirname, ".."), - ]); -} - -export function resolveAgentSdkModuleEntry(specifier, options = {}) { - for (const rootDir of getAgentSdkModuleRoots(options)) { - const entryPath = resolveModuleEntryFromRoot(specifier, rootDir); - if (entryPath && existsSync(entryPath)) { - return { entryPath, rootDir }; - } - } - return null; -} - -export function hasCodexCliBinary(rootDir, options = {}) { - const platform = String(options.platform || process.platform).trim().toLowerCase(); - const arch = String(options.arch || process.arch).trim().toLowerCase(); - const platformEntry = CODEX_PLATFORM_PACKAGE_MAP[`${platform}-${arch}`]; - if (!platformEntry) return true; - const packageRoot = resolve(rootDir, "node_modules", ...platformEntry.packageDir.split("/")); - return existsSync(resolve(packageRoot, platformEntry.binaryPath)); -} - -export function resolveCodexSdkInstall(options = {}) { - for (const rootDir of getAgentSdkModuleRoots(options)) { - const entryPath = resolveModuleEntryFromRoot("@openai/codex-sdk", rootDir); - if (!entryPath || !existsSync(entryPath)) continue; - if (!hasCodexCliBinary(rootDir, options)) continue; - return { entryPath, rootDir }; - } - return null; -} - export function parseAgentSdkConfig(toml) { const agentSection = parseTomlSection(toml, "[agent_sdk]"); const capsSection = parseTomlSection(toml, "[agent_sdk.capabilities]"); diff --git a/agent/agent-supervisor.mjs b/agent/agent-supervisor.mjs index a6831d52a..174a83a8d 100644 --- a/agent/agent-supervisor.mjs +++ b/agent/agent-supervisor.mjs @@ -1,5 +1,3 @@ -import { addSpanEvent, recordIntervention } from "../infra/tracing.mjs"; -import { shouldBlockAgentPushes } from "../infra/guardrails.mjs"; /** * agent-supervisor.mjs — Unified Agent Health Scoring & Intervention Engine * @@ -30,6 +28,9 @@ import { shouldBlockAgentPushes } from "../infra/guardrails.mjs"; * * @module agent-supervisor */ + +import { addSpanEvent, recordIntervention } from "../infra/tracing.mjs"; + const TAG = "[agent-supervisor]"; const API_ERROR_CONTINUE_COOLDOWNS_MS = Object.freeze([ 3 * 60_000, @@ -38,15 +39,6 @@ const API_ERROR_CONTINUE_COOLDOWNS_MS = Object.freeze([ ]); const API_ERROR_RECOVERY_RESET_MS = 15 * 60_000; -function workflowOwnsPushLifecycle(context = {}) { - const repoRoot = String(context.repoRoot || context.worktreePath || process.cwd()).trim(); - try { - return shouldBlockAgentPushes(repoRoot); - } catch { - return true; - } -} - // ── Situation Types (30+ edge cases) ──────────────────────────────────────── /** @@ -184,7 +176,7 @@ const RECOVERY_PROMPTS = { [SITUATION.PLAN_STUCK]: (ctx) => `CRITICAL: You created a plan for "${ctx.taskTitle}" but stopped before implementing. ` + `This is autonomous execution — NO ONE will respond to "ready to implement?" questions. ` + - `IMPLEMENT NOW: edit files, run tests, commit with conventional commits, and hand off for workflow push. ` + + `IMPLEMENT NOW: edit files, run tests, commit with conventional commits, and push. ` + `Do NOT create another plan. Do NOT ask for permission. Start coding immediately.`, [SITUATION.FALSE_COMPLETION]: (ctx) => @@ -193,21 +185,19 @@ const RECOVERY_PROMPTS = { `1. Make the actual code changes (edit files)\n` + `2. Run tests: go test ./...\n` + `3. Commit: git add -A && git commit -s -m "feat(scope): description"\n` + - `4. Stop after local validation and mark the run ready for Bosun-managed workflow push handoff\n` + + `4. Push: git push --set-upstream origin ${ctx.branch || ""}\n` + `Verify each step succeeded before claiming completion.`, [SITUATION.NO_COMMITS]: (ctx) => `Task "${ctx.taskTitle}" completed ${ctx.attemptCount || 0} time(s) with zero commits. ` + `Check existing progress: git log --oneline -5 && git status\n` + - `If changes exist but aren't committed, commit them and prepare workflow handoff.\n` + + `If changes exist but aren't committed, commit and push them.\n` + `If no changes exist, implement the task requirements fully before completing.`, [SITUATION.COMMITS_NOT_PUSHED]: (ctx) => - `You made commits for "${ctx.taskTitle}" and direct agent pushes are disabled.\n` + - `Do NOT run git push. Instead:\n` + - `1. Run the local validation expected by the repository, including the pre-push quality gate\n` + - `2. Resolve any failures locally\n` + - `3. Mark the run ready for Bosun-managed workflow push and PR lifecycle handoff.`, + `You made commits for "${ctx.taskTitle}" but never pushed them. Run:\n` + + `git push --set-upstream origin ${ctx.branch || "$(git branch --show-current)"}\n` + + `If push fails due to pre-push hooks, fix the issues and push again.`, [SITUATION.PR_NOT_CREATED]: (ctx) => `You pushed commits for "${ctx.taskTitle}" but no PR is visible yet.\n` + @@ -460,7 +450,7 @@ export class AgentSupervisor { assess(taskId, context = {}) { const state = this._ensureTaskState(taskId); const signals = this._gatherSignals(taskId, context); - const situation = this._diagnose(signals, context) ?? SITUATION.HEALTHY; + const situation = this._diagnose(signals, context); const healthScore = this._computeHealthScore(signals); const recoveryOverride = this._selectRecoveryIntervention(taskId, situation, context, state); const attemptIndex = Math.min( @@ -607,52 +597,11 @@ export class AgentSupervisor { case INTERVENTION.DISPATCH_FIX: { const state = this._getTaskState(taskId); - const issueCount = Array.isArray(state?.reviewIssues) - ? state.reviewIssues.length - : 0; - let dispatchResult = null; if (this._dispatchFixTask && state?.reviewIssues?.length) { - dispatchResult = this._dispatchFixTask(taskId, state.reviewIssues); - if (!dispatchResult || typeof dispatchResult !== "object") { - dispatchResult = { - dispatched: true, - mode: "dispatch_fix_task", - issueCount, - }; - } + this._dispatchFixTask(taskId, state.reviewIssues); } else if (this._injectPrompt && prompt) { // Fallback: inject fix prompt into current session this._injectPrompt(taskId, prompt); - dispatchResult = { - dispatched: true, - mode: "inject_prompt", - issueCount, - }; - } - if (dispatchResult?.dispatched && this._sendTelegram) { - const task = this._resolveTask(taskId); - const title = task?.title || taskId; - const modeLabel = - dispatchResult.mode === "active_session" - ? "active session" - : dispatchResult.mode === "redispatch" - ? "new remediation session" - : dispatchResult.mode === "inject_prompt" - ? "prompt injection" - : "review remediation"; - const message = [ - ":construction: Review follow-up started", - `Task: ${title}`, - "Summary: Bosun is implementing the requested review changes.", - issueCount ? `Issues: ${issueCount}` : "", - `Mode: ${modeLabel}`, - ] - .filter(Boolean) - .join("\n"); - this._sendTelegram(message, { - dedupKey: `review-fix-start|${taskId}|${dispatchResult.mode || "unknown"}|${issueCount}`, - exactDedup: true, - }); } break; } @@ -1282,7 +1231,6 @@ export class AgentSupervisor { if (context.hasCommits && !context.prUrl && !context.prNumber) { // Has commits but no PR const isPushed = context.isPushed ?? true; // assume pushed unless told otherwise - if (!isPushed && workflowOwnsPushLifecycle(context)) return null; if (!isPushed) return SITUATION.COMMITS_NOT_PUSHED; return SITUATION.PR_NOT_CREATED; } diff --git a/agent/agent-tool-config.mjs b/agent/agent-tool-config.mjs index d440f06dd..888f041c1 100644 --- a/agent/agent-tool-config.mjs +++ b/agent/agent-tool-config.mjs @@ -17,16 +17,6 @@ * "defaults": { * "builtinTools": [...], // default tool list for all agents * "updatedAt": "..." - * }, - * "toolOverhead": { - * "": { - * "total": 12345, // total serialized chars across tool defs - * "bySource": { - * "builtin": 3456, - * "github": 8889 - * }, - * "updatedAt": "2026-01-01T00:00:00.000Z" - * } * } * } * @@ -38,12 +28,10 @@ * setAgentToolConfig(rootDir, agentId, config) — update config for one agent * getEffectiveTools(rootDir, agentId) — compute final enabled tools list * listAvailableTools(rootDir) — list all available tools (builtin + MCP) - * measureToolDefinitionChars(toolDefs) — char counts for serialized tool defs - * getToolOverheadReport(rootDir, agentId) — persisted/runtime overhead summary */ import { existsSync, readFileSync, writeFileSync, mkdirSync } from "node:fs"; -import { basename, resolve } from "node:path"; +import { resolve } from "node:path"; import { homedir } from "node:os"; // ── Constants ───────────────────────────────────────────────────────────────── @@ -63,36 +51,6 @@ function getBosunHome() { * Default built-in tools available to all voice agents and executors. * Maps to common capabilities that voice/agent sessions can invoke. */ -function measureToolDefinitionCharsInternal(toolDefs = []) { - const tools = Array.isArray(toolDefs) - ? toolDefs.map((toolDef, index) => { - const serialized = JSON.stringify(toolDef ?? null); - const fallbackId = `tool-${index + 1}`; - return { - id: String(toolDef?.id || toolDef?.name || toolDef?.tool_name || fallbackId), - chars: serialized.length, - }; - }) - : []; - - return { - total: tools.reduce((sum, tool) => sum + tool.chars, 0), - tools, - }; -} - -export function measureToolDefinitionChars(toolDefs = []) { - return measureToolDefinitionCharsInternal(toolDefs); -} - -function normalizeSourceCharMap(bySource = {}) { - return Object.fromEntries( - Object.entries(bySource) - .map(([source, chars]) => [String(source), Math.max(0, Number(chars) || 0)]) - .sort((left, right) => right[1] - left[1] || left[0].localeCompare(right[0])), - ); -} - export const DEFAULT_BUILTIN_TOOLS = Object.freeze([ { id: "search-files", @@ -219,11 +177,7 @@ export const DEFAULT_BUILTIN_TOOLS = Object.freeze([ // ── Config File I/O ─────────────────────────────────────────────────────────── function getConfigPath(rootDir) { - const baseDir = rootDir || getBosunHome(); - const configDir = basename(resolve(baseDir)) === ".bosun" - ? resolve(baseDir) - : resolve(baseDir, ".bosun"); - return resolve(configDir, CONFIG_FILE); + return resolve(rootDir || getBosunHome(), ".bosun", CONFIG_FILE); } /** @@ -240,7 +194,6 @@ export function loadToolConfig(rootDir) { builtinTools: DEFAULT_BUILTIN_TOOLS.filter((t) => t.default).map((t) => t.id), updatedAt: new Date().toISOString(), }, - toolOverhead: {}, }; } try { @@ -252,7 +205,6 @@ export function loadToolConfig(rootDir) { builtinTools: DEFAULT_BUILTIN_TOOLS.filter((t) => t.default).map((t) => t.id), updatedAt: new Date().toISOString(), }, - toolOverhead: parsed.toolOverhead || {}, }; } catch { return { @@ -261,7 +213,6 @@ export function loadToolConfig(rootDir) { builtinTools: DEFAULT_BUILTIN_TOOLS.filter((t) => t.default).map((t) => t.id), updatedAt: new Date().toISOString(), }, - toolOverhead: {}, }; } } @@ -269,7 +220,7 @@ export function loadToolConfig(rootDir) { /** * Save the full tool configuration. * @param {string} rootDir - * @param {{ agents: Object, defaults: Object, toolOverhead?: Object }} config + * @param {{ agents: Object, defaults: Object }} config */ export function saveToolConfig(rootDir, config) { const configPath = getConfigPath(rootDir); @@ -385,66 +336,3 @@ export async function listAvailableTools(rootDir) { })), }; } - -function persistToolOverheadReport(rootDir, agentId, report) { - if (!rootDir || !agentId || !report) return report; - const cfg = loadToolConfig(rootDir); - cfg.toolOverhead ||= {}; - cfg.toolOverhead[agentId] = { - total: Math.max(0, Number(report.total) || 0), - bySource: normalizeSourceCharMap(report.bySource), - updatedAt: new Date().toISOString(), - }; - saveToolConfig(rootDir, cfg); - return cfg.toolOverhead[agentId]; -} - -export function getToolOverheadReport(rootDir, agentId) { - const cfg = loadToolConfig(rootDir); - const stored = cfg.toolOverhead?.[agentId] || null; - if (!stored) return { total: 0, bySource: {} }; - return { - total: Math.max(0, Number(stored.total) || 0), - bySource: normalizeSourceCharMap(stored.bySource), - }; -} - -export async function refreshToolOverheadReport(rootDir, agentId, options = {}) { - const builtinMeasurement = measureToolDefinitionChars(DEFAULT_BUILTIN_TOOLS); - const report = { - total: builtinMeasurement.total, - bySource: { builtin: builtinMeasurement.total }, - }; - - const agentConfig = agentId ? getAgentToolConfig(rootDir, agentId) : { enabledMcpServers: [] }; - const enabledServerIds = Array.isArray(options.serverIds) - ? options.serverIds - : Array.isArray(agentConfig.enabledMcpServers) - ? agentConfig.enabledMcpServers - : []; - - if (enabledServerIds.length > 0) { - try { - const { resolveMcpServersForAgent } = await import("../workflow/mcp-registry.mjs"); - const servers = await resolveMcpServersForAgent(rootDir, enabledServerIds); - for (const server of servers) { - const serverDefs = Array.isArray(server?.tools) ? server.tools : []; - const measurement = measureToolDefinitionChars(serverDefs); - report.bySource[server.id || server.name || "unknown-mcp"] = measurement.total; - report.total += measurement.total; - } - } catch { - for (const serverId of enabledServerIds) report.bySource[serverId] ||= 0; - } - } - - const persisted = persistToolOverheadReport(rootDir, agentId, report); - if (process.env.BOSUN_LOG_TOOL_OVERHEAD === "1") { - console.log(TAG + " tool definition overhead for " + (agentId || "agent") + ":"); - for (const [source, chars] of Object.entries(persisted.bySource)) { - console.log(TAG + " " + source + ": " + chars + " chars"); - } - console.log(TAG + " total: " + persisted.total + " chars"); - } - return persisted; -} diff --git a/agent/autofix.mjs b/agent/autofix.mjs index 101276fd6..0158a08cd 100644 --- a/agent/autofix.mjs +++ b/agent/autofix.mjs @@ -585,7 +585,6 @@ export function runCodexExec( child = spawn(codexBin, args, { ...spawnOptions, shell: false, - windowsHide: true, }); } catch (err) { return promiseResolve({ diff --git a/agent/bosun-skills.mjs b/agent/bosun-skills.mjs index 36e35c9bf..027bd8a23 100644 --- a/agent/bosun-skills.mjs +++ b/agent/bosun-skills.mjs @@ -1,3 +1,4 @@ +#!/usr/bin/env node /** * bosun-skills — Agent Skills Knowledge-Base * diff --git a/agent/hook-library.mjs b/agent/hook-library.mjs index 34416277b..5b816177a 100644 --- a/agent/hook-library.mjs +++ b/agent/hook-library.mjs @@ -204,7 +204,7 @@ const BUILTIN_HOOKS = [ events: "PostToolUse", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"ts\\":$(date +%s),\\"sdk\\":\\"$VE_SDK\\",\\"task\\":\\"$VE_TASK_ID\\"}" > .bosun/session-heartbeat.json'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{ts=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID} | ConvertTo-Json | Set-Content -Encoding UTF8 .bosun/session-heartbeat.json"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{ts=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID} | ConvertTo-Json | Set-Content .bosun/session-heartbeat.json"`, ), blocking: false, timeout: 5_000, @@ -224,7 +224,7 @@ const BUILTIN_HOOKS = [ events: "SessionStart", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"started\\":$(date +%s),\\"sdk\\":\\"$VE_SDK\\",\\"task\\":\\"$VE_TASK_ID\\",\\"branch\\":\\"$VE_BRANCH_NAME\\",\\"status\\":\\"active\\"}" > .bosun/session-state.json'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{started=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID;branch=$env:VE_BRANCH_NAME;status='active'} | ConvertTo-Json | Set-Content -Encoding UTF8 .bosun/session-state.json"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{started=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID;branch=$env:VE_BRANCH_NAME;status='active'} | ConvertTo-Json | Set-Content .bosun/session-state.json"`, ), blocking: false, timeout: 5_000, @@ -245,7 +245,7 @@ const BUILTIN_HOOKS = [ events: "SessionStop", command: shellCmd( `bash -c 'if [ -f .bosun/session-state.json ]; then TMP=$(cat .bosun/session-state.json); echo "$TMP" | sed "s/\\"status\\":\\"active\\"/\\"status\\":\\"completed\\"/" > .bosun/session-state.json; fi'`, - `powershell -NoProfile -Command "if (Test-Path .bosun/session-state.json) { $j = Get-Content .bosun/session-state.json | ConvertFrom-Json; $j.status = 'completed'; $j | ConvertTo-Json | Set-Content -Encoding UTF8 .bosun/session-state.json }"`, + `powershell -NoProfile -Command "if (Test-Path .bosun/session-state.json) { $j = Get-Content .bosun/session-state.json | ConvertFrom-Json; $j.status = 'completed'; $j | ConvertTo-Json | Set-Content .bosun/session-state.json }"`, ), blocking: false, timeout: 5_000, @@ -266,7 +266,7 @@ const BUILTIN_HOOKS = [ events: "PostToolUse", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"ts\\":$(date +%s),\\"tool\\":\\"$VE_HOOK_TOOL_NAME\\",\\"sdk\\":\\"$VE_SDK\\",\\"task\\":\\"$VE_TASK_ID\\"}" >> .bosun/tool-activity.jsonl'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{ts=[int](Get-Date -UFormat %s);tool=$env:VE_HOOK_TOOL_NAME;sdk=$env:VE_SDK;task=$env:VE_TASK_ID} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/tool-activity.jsonl"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{ts=[int](Get-Date -UFormat %s);tool=$env:VE_HOOK_TOOL_NAME;sdk=$env:VE_SDK;task=$env:VE_TASK_ID} | ConvertTo-Json -Compress | Add-Content .bosun/tool-activity.jsonl"`, ), blocking: false, timeout: 3_000, @@ -307,7 +307,7 @@ const BUILTIN_HOOKS = [ events: "TaskComplete", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"task\\":\\"$VE_TASK_ID\\",\\"status\\":\\"completed\\",\\"ts\\":$(date +%s),\\"branch\\":\\"$VE_BRANCH_NAME\\"}" > .bosun/task-result.json && echo "OK: task result recorded"'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{task=$env:VE_TASK_ID;status='completed';ts=[int](Get-Date -UFormat %s);branch=$env:VE_BRANCH_NAME} | ConvertTo-Json | Set-Content -Encoding UTF8 .bosun/task-result.json; Write-Host 'OK: task result recorded'"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{task=$env:VE_TASK_ID;status='completed';ts=[int](Get-Date -UFormat %s);branch=$env:VE_BRANCH_NAME} | ConvertTo-Json | Set-Content .bosun/task-result.json; Write-Host 'OK: task result recorded'"`, ), blocking: false, timeout: 5_000, @@ -362,25 +362,6 @@ const BUILTIN_HOOKS = [ tags: ["safety", "git", "branch-protection", "blocking"], }, - { - id: "safety-block-agent-direct-push", - name: "Block Agent Direct Push", - description: "Prevents agents from running git push directly when Bosun guardrails require workflow-owned push handoff.", - category: "safety", - events: "PreToolUse", - command: shellCmd( - `bash -c 'CMD="$VE_HOOK_COMMAND"; if echo "$CMD" | grep -qiE "git\\s+push\\b"; then node -e "const fs=require(\"fs\");const path=require(\"path\");let block=true;try{const policyPath=path.join(process.cwd(),\".bosun\",\"guardrails.json\");if(fs.existsSync(policyPath)){const policy=JSON.parse(fs.readFileSync(policyPath,\"utf8\"));block=policy?.push?.blockAgentPushes!==false;}}catch{} if(block){console.error(\"BLOCKED: Direct agent pushes are disabled. Commit your changes and let Bosun workflow automation perform the validated push.\");process.exit(1);}"; fi'`, - `powershell -NoProfile -Command "if ($env:VE_HOOK_COMMAND -match 'git\\s+push\\b') { node -e 'const fs=require(\"fs\");const path=require(\"path\");let block=true;try{const policyPath=path.join(process.cwd(),\".bosun\",\"guardrails.json\");if(fs.existsSync(policyPath)){const policy=JSON.parse(fs.readFileSync(policyPath,\"utf8\"));block=policy?.push?.blockAgentPushes!==false;}}catch{} if(block){console.error(\"BLOCKED: Direct agent pushes are disabled. Commit your changes and let Bosun workflow automation perform the validated push.\");process.exit(1);}' ; if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } }"`, - ), - blocking: true, - timeout: 5_000, - sdks: ["*"], - core: false, - defaultEnabled: true, - retryable: false, - tags: ["safety", "git", "push", "workflow-only", "blocking"], - }, - { id: "safety-block-destructive-commands", name: "Block Destructive Commands", @@ -609,7 +590,7 @@ const BUILTIN_HOOKS = [ events: "SessionStart", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"event\\":\\"session_start\\",\\"ts\\":$(date +%s),\\"sdk\\":\\"$VE_SDK\\",\\"task\\":\\"$VE_TASK_ID\\",\\"branch\\":\\"$VE_BRANCH_NAME\\"}" >> .bosun/audit.jsonl'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event='session_start';ts=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID;branch=$env:VE_BRANCH_NAME} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/audit.jsonl"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event='session_start';ts=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID;branch=$env:VE_BRANCH_NAME} | ConvertTo-Json -Compress | Add-Content .bosun/audit.jsonl"`, ), blocking: false, timeout: 5_000, @@ -628,7 +609,7 @@ const BUILTIN_HOOKS = [ events: "SessionStop", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"event\\":\\"session_stop\\",\\"ts\\":$(date +%s),\\"sdk\\":\\"$VE_SDK\\",\\"task\\":\\"$VE_TASK_ID\\"}" >> .bosun/audit.jsonl'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event='session_stop';ts=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/audit.jsonl"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event='session_stop';ts=[int](Get-Date -UFormat %s);sdk=$env:VE_SDK;task=$env:VE_TASK_ID} | ConvertTo-Json -Compress | Add-Content .bosun/audit.jsonl"`, ), blocking: false, timeout: 5_000, @@ -647,7 +628,7 @@ const BUILTIN_HOOKS = [ events: "PreToolUse", command: shellCmd( `bash -c 'if [ -f .bosun/session-heartbeat.json ]; then LAST=$(cat .bosun/session-heartbeat.json | grep -o "\\"ts\\":[0-9]*" | grep -o "[0-9]*"); NOW=$(date +%s); DIFF=$((NOW - LAST)); if [ "$DIFF" -gt 300 ]; then echo "{\\"event\\":\\"idle_detected\\",\\"idle_seconds\\":$DIFF,\\"ts\\":$NOW}" >> .bosun/audit.jsonl; echo "WARNING: Agent idle for \${DIFF}s" >&2; fi; fi'`, - `powershell -NoProfile -Command "if (Test-Path .bosun/session-heartbeat.json) { $j = Get-Content .bosun/session-heartbeat.json | ConvertFrom-Json; $diff = [int](Get-Date -UFormat %s) - $j.ts; if ($diff -gt 300) { @{event='idle_detected';idle_seconds=$diff;ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/audit.jsonl; Write-Warning \\"Agent idle for \${diff}s\\" } }"`, + `powershell -NoProfile -Command "if (Test-Path .bosun/session-heartbeat.json) { $j = Get-Content .bosun/session-heartbeat.json | ConvertFrom-Json; $diff = [int](Get-Date -UFormat %s) - $j.ts; if ($diff -gt 300) { @{event='idle_detected';idle_seconds=$diff;ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json -Compress | Add-Content .bosun/audit.jsonl; Write-Warning \\"Agent idle for \${diff}s\\" } }"`, ), blocking: false, timeout: 5_000, @@ -666,7 +647,7 @@ const BUILTIN_HOOKS = [ events: ["SessionStart", "SessionStop"], command: shellCmd( `bash -c 'mkdir -p .bosun && git diff --stat HEAD > .bosun/git-status-snapshot.txt 2>/dev/null && echo "OK: git snapshot saved"'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; git diff --stat HEAD 2>$null | Set-Content -Encoding UTF8 .bosun/git-status-snapshot.txt; Write-Host 'OK: git snapshot saved'"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; git diff --stat HEAD 2>$null | Set-Content .bosun/git-status-snapshot.txt; Write-Host 'OK: git snapshot saved'"`, ), blocking: false, timeout: 10_000, @@ -685,7 +666,7 @@ const BUILTIN_HOOKS = [ events: "PostCommit", command: shellCmd( `bash -c 'mkdir -p .bosun; F=.bosun/session-metrics.json; if [ -f "$F" ]; then N=$(cat "$F" | grep -o "\\"commits\\":[0-9]*" | grep -o "[0-9]*" || echo 0); else N=0; fi; N=$((N + 1)); echo "{\\"commits\\":$N,\\"last_commit_ts\\":$(date +%s)}" > "$F"'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $f = '.bosun/session-metrics.json'; $n = 0; if (Test-Path $f) { try { $n = (Get-Content $f | ConvertFrom-Json).commits } catch {} }; $n++; @{commits=$n;last_commit_ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json | Set-Content -Encoding UTF8 $f"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $f = '.bosun/session-metrics.json'; $n = 0; if (Test-Path $f) { try { $n = (Get-Content $f | ConvertFrom-Json).commits } catch {} }; $n++; @{commits=$n;last_commit_ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json | Set-Content $f"`, ), blocking: false, timeout: 5_000, @@ -708,7 +689,7 @@ const BUILTIN_HOOKS = [ events: "PostToolUse", command: shellCmd( `bash -c 'mkdir -p .bosun && CMD="$VE_HOOK_COMMAND"; FILES=$(echo "$CMD" | grep -oE "[a-zA-Z0-9_./-]+\\.(js|mjs|ts|tsx|py|go|rs|java|rb|md|json|yaml|yml|toml)" | head -5 | tr "\\n" ","); if [ -n "$FILES" ]; then echo "{\\"ts\\":$(date +%s),\\"files\\":\\"$FILES\\",\\"tool\\":\\"$VE_HOOK_TOOL_NAME\\"}" >> .bosun/file-access.jsonl; fi'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $files = [regex]::Matches($env:VE_HOOK_COMMAND, '[a-zA-Z0-9_.\\\\/-]+\\.(js|mjs|ts|tsx|py|go|rs|java|rb|md|json|yaml|yml|toml)') | Select-Object -First 5 -ExpandProperty Value; if ($files) { @{ts=[int](Get-Date -UFormat %s);files=($files -join ',');tool=$env:VE_HOOK_TOOL_NAME} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/file-access.jsonl }"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $files = [regex]::Matches($env:VE_HOOK_COMMAND, '[a-zA-Z0-9_.\\\\/-]+\\.(js|mjs|ts|tsx|py|go|rs|java|rb|md|json|yaml|yml|toml)') | Select-Object -First 5 -ExpandProperty Value; if ($files) { @{ts=[int](Get-Date -UFormat %s);files=($files -join ',');tool=$env:VE_HOOK_TOOL_NAME} | ConvertTo-Json -Compress | Add-Content .bosun/file-access.jsonl }"`, ), blocking: false, timeout: 3_000, @@ -727,7 +708,7 @@ const BUILTIN_HOOKS = [ events: "SessionStart", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"modified\\":$(git diff --name-only 2>/dev/null | wc -l),\\"staged\\":$(git diff --cached --name-only 2>/dev/null | wc -l),\\"recent_commits\\":[$(git log --oneline -5 --format="\\\"%h: %s\\\"" 2>/dev/null | tr "\\n" "," | sed "s/,$//")],\\"branch\\":\\"$(git rev-parse --abbrev-ref HEAD 2>/dev/null)\\"}" > .bosun/working-set.json'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $mod = (git diff --name-only 2>$null | Measure-Object).Count; $staged = (git diff --cached --name-only 2>$null | Measure-Object).Count; $commits = git log --oneline -5 2>$null; $branch = git rev-parse --abbrev-ref HEAD 2>$null; @{modified=$mod;staged=$staged;recent_commits=@($commits);branch=$branch} | ConvertTo-Json | Set-Content -Encoding UTF8 .bosun/working-set.json"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $mod = (git diff --name-only 2>$null | Measure-Object).Count; $staged = (git diff --cached --name-only 2>$null | Measure-Object).Count; $commits = git log --oneline -5 2>$null; $branch = git rev-parse --abbrev-ref HEAD 2>$null; @{modified=$mod;staged=$staged;recent_commits=@($commits);branch=$branch} | ConvertTo-Json | Set-Content .bosun/working-set.json"`, ), blocking: false, timeout: 10_000, @@ -950,7 +931,7 @@ const BUILTIN_HOOKS = [ events: "TaskComplete", command: shellCmd( `bash -c 'mkdir -p .bosun && AHEAD=$(git rev-list --count $(git merge-base HEAD origin/main 2>/dev/null || echo HEAD)..HEAD 2>/dev/null || echo 0) && echo "{\\"event\\":\\"task_complete\\",\\"task\\":\\"$VE_TASK_ID\\",\\"commits\\":$AHEAD,\\"ts\\":$(date +%s),\\"branch\\":\\"$VE_BRANCH_NAME\\"}" >> .bosun/completions.jsonl'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $mb = git merge-base HEAD origin/main 2>$null; $ahead = if ($mb) { [int](git rev-list --count \"$mb..HEAD\" 2>$null) } else { 0 }; @{event='task_complete';task=$env:VE_TASK_ID;commits=$ahead;ts=[int](Get-Date -UFormat %s);branch=$env:VE_BRANCH_NAME} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/completions.jsonl"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; $mb = git merge-base HEAD origin/main 2>$null; $ahead = if ($mb) { [int](git rev-list --count \"$mb..HEAD\" 2>$null) } else { 0 }; @{event='task_complete';task=$env:VE_TASK_ID;commits=$ahead;ts=[int](Get-Date -UFormat %s);branch=$env:VE_BRANCH_NAME} | ConvertTo-Json -Compress | Add-Content .bosun/completions.jsonl"`, ), blocking: false, timeout: 10_000, @@ -988,7 +969,7 @@ const BUILTIN_HOOKS = [ events: "PostPR", command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"event\\":\\"pr_created\\",\\"task\\":\\"$VE_TASK_ID\\",\\"branch\\":\\"$VE_BRANCH_NAME\\",\\"ts\\":$(date +%s)}" >> .bosun/audit.jsonl'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event='pr_created';task=$env:VE_TASK_ID;branch=$env:VE_BRANCH_NAME;ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/audit.jsonl"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event='pr_created';task=$env:VE_TASK_ID;branch=$env:VE_BRANCH_NAME;ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json -Compress | Add-Content .bosun/audit.jsonl"`, ), blocking: false, timeout: 5_000, @@ -1011,7 +992,7 @@ const BUILTIN_HOOKS = [ events: ["SessionStart", "SessionStop"], command: shellCmd( `bash -c 'mkdir -p .bosun/events && echo "{\\"event\\":\\"agent.$VE_HOOK_EVENT\\",\\"sdk\\":\\"$VE_SDK\\",\\"task\\":\\"$VE_TASK_ID\\",\\"ts\\":$(date +%s)}" > .bosun/events/$(date +%s)-$VE_HOOK_EVENT.json'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun/events | Out-Null; $ts = [int](Get-Date -UFormat %s); @{event=\"agent.$($env:VE_HOOK_EVENT)\";sdk=$env:VE_SDK;task=$env:VE_TASK_ID;ts=$ts} | ConvertTo-Json | Set-Content -Encoding UTF8 \".bosun/events/$ts-$($env:VE_HOOK_EVENT).json\""`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun/events | Out-Null; $ts = [int](Get-Date -UFormat %s); @{event=\"agent.$($env:VE_HOOK_EVENT)\";sdk=$env:VE_SDK;task=$env:VE_TASK_ID;ts=$ts} | ConvertTo-Json | Set-Content \".bosun/events/$ts-$($env:VE_HOOK_EVENT).json\""`, ), blocking: false, timeout: 5_000, @@ -1052,7 +1033,7 @@ const BUILTIN_HOOKS = [ events: ["SubagentStart", "SubagentStop"], command: shellCmd( `bash -c 'mkdir -p .bosun && echo "{\\"event\\":\\"$VE_HOOK_EVENT\\",\\"sdk\\":\\"$VE_SDK\\",\\"task\\":\\"$VE_TASK_ID\\",\\"ts\\":$(date +%s)}" >> .bosun/subagent-log.jsonl'`, - `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event=$env:VE_HOOK_EVENT;sdk=$env:VE_SDK;task=$env:VE_TASK_ID;ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json -Compress | Add-Content -Encoding UTF8 .bosun/subagent-log.jsonl"`, + `powershell -NoProfile -Command "New-Item -ItemType Directory -Force -Path .bosun | Out-Null; @{event=$env:VE_HOOK_EVENT;sdk=$env:VE_SDK;task=$env:VE_TASK_ID;ts=[int](Get-Date -UFormat %s)} | ConvertTo-Json -Compress | Add-Content .bosun/subagent-log.jsonl"`, ), blocking: false, timeout: 5_000, diff --git a/agent/hook-profiles.mjs b/agent/hook-profiles.mjs index e5c820588..d7bc8d6d7 100644 --- a/agent/hook-profiles.mjs +++ b/agent/hook-profiles.mjs @@ -1,7 +1,6 @@ import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; import { resolve, dirname, relative } from "node:path"; import { fileURLToPath } from "node:url"; -import { detectProjectStack } from "../workflow/project-detection.mjs"; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); @@ -9,7 +8,7 @@ const __dirname = dirname(__filename); const DEFAULT_TIMEOUT_MS = 60_000; const DEFAULT_HOOK_SCHEMA = "https://json-schema.org/draft/2020-12/schema"; const LEGACY_BRIDGE_SNIPPET = "scripts/bosun/agent-hook-bridge.mjs"; -const DEFAULT_BRIDGE_SCRIPT_PATH = "agent/agent-hook-bridge.mjs"; +const DEFAULT_BRIDGE_SCRIPT_PATH = resolve(__dirname, "agent-hook-bridge.mjs"); function getHookNodeBinary() { const configured = String(process.env.BOSUN_HOOK_NODE_BIN || "").trim(); @@ -33,28 +32,24 @@ export const HOOK_PROFILES = Object.freeze([ const PRESET_FLAGS = Object.freeze({ strict: { includeSessionHooks: true, - includePostToolUse: true, includePreCommit: true, includePrePush: true, includeTaskComplete: true, }, balanced: { includeSessionHooks: true, - includePostToolUse: true, includePreCommit: false, includePrePush: true, includeTaskComplete: true, }, lightweight: { includeSessionHooks: true, - includePostToolUse: false, includePreCommit: false, includePrePush: false, includeTaskComplete: false, }, none: { includeSessionHooks: false, - includePostToolUse: false, includePreCommit: false, includePrePush: false, includeTaskComplete: false, @@ -82,43 +77,38 @@ const PRESET_COMMANDS = Object.freeze({ timeout: 10_000, }, ]), - PostToolUse: Object.freeze([ + PrePush: Object.freeze([ { - id: "post-tool-use-validation", - command: "", - description: "Run lightweight repository validation after edit tools complete", - blocking: false, + id: "prepush-go-vet", + command: "go vet ./...", + description: "Run go vet before push", + blocking: true, timeout: 120_000, }, - ]), - TaskComplete: Object.freeze([ { - id: "task-complete-audit", - command: 'echo "[hook] Task completed: ${VE_TASK_ID} — ${VE_TASK_TITLE}"', - description: "Audit log for task completion", - blocking: false, - timeout: 10_000, + id: "prepush-go-build", + command: "go build ./...", + description: "Verify Go build succeeds before push", + blocking: true, + timeout: 300_000, }, ]), -}); - -const PORTABLE_VALIDATION_COMMANDS = Object.freeze({ - PrePush: Object.freeze([ + PreCommit: Object.freeze([ { - id: "prepush-git-diff-check", - command: "git diff --check", - description: "Check tracked changes for whitespace and conflict-marker issues before push", - blocking: true, + id: "precommit-gofmt", + command: "gofmt -l .", + description: "Check Go formatting before commit", + blocking: false, timeout: 30_000, }, ]), - PreCommit: Object.freeze([ + TaskComplete: Object.freeze([ { - id: "precommit-git-staged-diff-check", - command: "git diff --cached --check", - description: "Check staged changes for whitespace and conflict-marker issues before commit", + id: "task-complete-audit", + command: 'echo "[hook] Task completed: ${VE_TASK_ID} — ${VE_TASK_TITLE}"', + description: "Audit log for task completion", blocking: false, - timeout: 30_000, + timeout: 10_000, }, ]), }); @@ -163,8 +153,8 @@ function isPortableNodeCommandToken(token) { } function isPortableBridgeScriptToken(token) { - const raw = String(token || "").trim().replace(/\\/g, "/"); - return raw === DEFAULT_BRIDGE_SCRIPT_PATH || raw === `./${DEFAULT_BRIDGE_SCRIPT_PATH}`; + const raw = String(token || ""); + return raw === DEFAULT_BRIDGE_SCRIPT_PATH || raw === LEGACY_BRIDGE_SNIPPET; } function isCopilotBridgeCommandPortable(commandTokens) { @@ -180,98 +170,6 @@ function deepClone(value) { return JSON.parse(JSON.stringify(value)); } -function slugifyHookId(value) { - return String(value || "") - .trim() - .toLowerCase() - .replace(/[^a-z0-9]+/g, "-") - .replace(/^-+|-+$/g, "") || "hook"; -} - -function createHookEntry({ - id, - command, - description, - blocking = false, - timeout = DEFAULT_TIMEOUT_MS, -}) { - return { - id, - command, - description, - blocking, - timeout, - sdks: ["*"], - }; -} - -function buildDetectedValidationCommands(rootDir) { - const detected = detectProjectStack(rootDir); - const qualityGate = String(detected?.commands?.qualityGate || "").trim(); - const postEdit = String(detected?.commands?.postEdit || "").trim(); - const lint = String(detected?.commands?.lint || "").trim(); - const syntaxCheck = String(detected?.commands?.syntaxCheck || "").trim(); - - return { - PostToolUse: (postEdit || lint || syntaxCheck) - ? [ - createHookEntry({ - id: `posttooluse-detected-${slugifyHookId(postEdit || lint || syntaxCheck)}`, - command: postEdit || lint || syntaxCheck, - description: `Run lightweight repository validation after edits (${detected?.primary?.label || "project"})`, - blocking: false, - timeout: 120_000, - }), - ] - : [], - PrePush: qualityGate - ? [ - createHookEntry({ - id: `prepush-detected-${slugifyHookId(qualityGate)}`, - command: qualityGate, - description: `Run detected repository quality gate before push (${detected?.primary?.label || "project"})`, - blocking: true, - timeout: 300_000, - }), - ] - : [], - PreCommit: (lint || syntaxCheck) - ? [ - createHookEntry({ - id: `precommit-detected-${slugifyHookId(lint || syntaxCheck)}`, - command: lint || syntaxCheck, - description: `Run detected repository validation before commit (${detected?.primary?.label || "project"})`, - blocking: false, - timeout: 120_000, - }), - ] - : [], - }; -} - -function buildDefaultHooksForEvent(event, rootDir) { - const defaults = [ - ...deepClone(PORTABLE_VALIDATION_COMMANDS[event] || []), - ]; - - if (rootDir && existsSync(rootDir)) { - defaults.push(...buildDetectedValidationCommands(rootDir)[event]); - } - - const seen = new Set(); - return defaults - .filter((entry) => { - const command = String(entry?.command || "").trim(); - if (!command || seen.has(command)) return false; - seen.add(command); - return true; - }) - .map((entry) => ({ - ...entry, - sdks: ["*"], - })); -} - function normalizeProfile(profile) { const raw = String(profile || "") .trim() @@ -358,9 +256,6 @@ export function buildHookScaffoldOptionsFromEnv(env = process.env) { SessionStop: normalizeOverrideCommands( env.BOSUN_HOOK_SESSION_STOP, ), - PostToolUse: normalizeOverrideCommands( - env.BOSUN_HOOK_POST_TOOL_USE ?? env.BOSUN_HOOK_POSTEDIT, - ), PrePush: normalizeOverrideCommands(env.BOSUN_HOOK_PREPUSH), PreCommit: normalizeOverrideCommands(env.BOSUN_HOOK_PRECOMMIT), TaskComplete: normalizeOverrideCommands( @@ -374,7 +269,6 @@ export function buildCanonicalHookConfig(options = {}) { const profile = normalizeProfile(options.profile); const flags = { ...PRESET_FLAGS[profile] }; const commandOverrides = options.commands || {}; - const rootDir = options.repoRoot ? resolve(options.repoRoot) : ""; const hooks = {}; @@ -390,14 +284,17 @@ export function buildCanonicalHookConfig(options = {}) { sdks: ["*"], })); } - if (flags.includePostToolUse) { - hooks.PostToolUse = buildDefaultHooksForEvent("PostToolUse", rootDir); - } if (flags.includePrePush) { - hooks.PrePush = buildDefaultHooksForEvent("PrePush", rootDir); + hooks.PrePush = deepClone(PRESET_COMMANDS.PrePush).map((item) => ({ + ...item, + sdks: ["*"], + })); } if (flags.includePreCommit) { - hooks.PreCommit = buildDefaultHooksForEvent("PreCommit", rootDir); + hooks.PreCommit = deepClone(PRESET_COMMANDS.PreCommit).map((item) => ({ + ...item, + sdks: ["*"], + })); } if (flags.includeTaskComplete) { hooks.TaskComplete = deepClone(PRESET_COMMANDS.TaskComplete).map( @@ -411,7 +308,6 @@ export function buildCanonicalHookConfig(options = {}) { for (const event of [ "SessionStart", "SessionStop", - "PostToolUse", "PrePush", "PreCommit", "TaskComplete", @@ -687,10 +583,7 @@ export function scaffoldAgentHookFiles(repoRoot, options = {}) { return result; } - const codexHookConfig = buildCanonicalHookConfig({ - ...options, - repoRoot: root, - }); + const codexHookConfig = buildCanonicalHookConfig(options); result.env = buildDisableEnv(codexHookConfig); if (targets.includes("codex")) { @@ -764,10 +657,10 @@ export function scaffoldAgentHookFiles(repoRoot, options = {}) { const geminiPath = resolve(root, ".gemini", "settings.json"); const geminiConfig = { hooks: { - SessionStart: [{ command: buildShellCommand(makeBridgeCommandTokens("gemini", "SessionStart")) }], - SessionStop: [{ command: buildShellCommand(makeBridgeCommandTokens("gemini", "SessionStop")) }], - PreToolUse: [{ command: buildShellCommand(makeBridgeCommandTokens("gemini", "PreToolUse")) }], - PostToolUse: [{ command: buildShellCommand(makeBridgeCommandTokens("gemini", "PostToolUse")) }], + SessionStart: [{ command: "node agent-hook-bridge.mjs --agent gemini --event SessionStart" }], + SessionStop: [{ command: "node agent-hook-bridge.mjs --agent gemini --event SessionStop" }], + PreToolUse: [{ command: "node agent-hook-bridge.mjs --agent gemini --event PreToolUse" }], + PostToolUse: [{ command: "node agent-hook-bridge.mjs --agent gemini --event PostToolUse" }], }, _bosun: { managed: true, profile: result.profile, generated: new Date().toISOString() }, }; @@ -789,11 +682,11 @@ export function scaffoldAgentHookFiles(repoRoot, options = {}) { const opencodePath = resolve(root, ".opencode", "hooks.json"); const opencodeConfig = { hooks: { - SessionStart: [{ command: buildShellCommand(makeBridgeCommandTokens("opencode", "SessionStart")) }], - SessionStop: [{ command: buildShellCommand(makeBridgeCommandTokens("opencode", "SessionStop")) }], - PreToolUse: [{ command: buildShellCommand(makeBridgeCommandTokens("opencode", "PreToolUse")) }], - PostToolUse: [{ command: buildShellCommand(makeBridgeCommandTokens("opencode", "PostToolUse")) }], - TaskComplete: [{ command: buildShellCommand(makeBridgeCommandTokens("opencode", "TaskComplete")) }], + SessionStart: [{ command: "node agent-hook-bridge.mjs --agent opencode --event SessionStart" }], + SessionStop: [{ command: "node agent-hook-bridge.mjs --agent opencode --event SessionStop" }], + PreToolUse: [{ command: "node agent-hook-bridge.mjs --agent opencode --event PreToolUse" }], + PostToolUse: [{ command: "node agent-hook-bridge.mjs --agent opencode --event PostToolUse" }], + TaskComplete: [{ command: "node agent-hook-bridge.mjs --agent opencode --event TaskComplete" }], }, _bosun: { managed: true, profile: result.profile, generated: new Date().toISOString() }, }; diff --git a/agent/primary-agent.mjs b/agent/primary-agent.mjs index 335a7282b..e0afdbe56 100644 --- a/agent/primary-agent.mjs +++ b/agent/primary-agent.mjs @@ -10,7 +10,7 @@ import { ensureCodexConfig, printConfigSummary } from "../shell/codex-config.mjs import { ensureRepoConfigs, printRepoConfigSummary } from "../config/repo-config.mjs"; import { resolveRepoRoot } from "../config/repo-root.mjs"; import { buildArchitectEditorFrame } from "../lib/repo-map.mjs"; -import { getAgentToolConfig, getEffectiveTools, refreshToolOverheadReport } from "./agent-tool-config.mjs"; +import { getAgentToolConfig, getEffectiveTools } from "./agent-tool-config.mjs"; import { getSessionTracker } from "../infra/session-tracker.mjs"; import { buildContextEnvelope } from "../workspace/context-cache.mjs"; import { getEntry, getEntryContent, resolveAgentProfileLibraryMetadata } from "../infra/library-manager.mjs"; @@ -69,25 +69,6 @@ import { } from "../shell/gemini-shell.mjs"; import { getModelsForExecutor, normalizeExecutorKey } from "../task/task-complexity.mjs"; -const toolOverheadRefreshCache = new Map(); - -function scheduleToolOverheadRefresh(rootDir, agentProfileId, enabledMcpServers = []) { - if (!agentProfileId) return; - const normalizedServerIds = Array.from(new Set( - enabledMcpServers.map((id) => String(id || "").trim()).filter(Boolean), - )).sort(); - const cacheKey = [rootDir, agentProfileId].join("::"); - const signature = JSON.stringify(normalizedServerIds); - const cached = toolOverheadRefreshCache.get(cacheKey); - if (cached?.signature === signature) return; - const promise = refreshToolOverheadReport(rootDir, agentProfileId, { serverIds: normalizedServerIds }) - .catch((error) => { - toolOverheadRefreshCache.delete(cacheKey); - console.warn("[primary-agent] failed to refresh tool overhead report:", error?.message || error); - }); - toolOverheadRefreshCache.set(cacheKey, { signature, promise }); -} - /** Valid agent interaction modes */ const CORE_MODES = ["ask", "agent", "plan", "web", "instant"]; /** Custom modes loaded from library */ @@ -253,9 +234,6 @@ function buildPrimaryToolCapabilityContract(options = {}) { const enabledMcpServers = Array.isArray(rawCfg?.enabledMcpServers) ? rawCfg.enabledMcpServers.map((id) => String(id || "").trim()).filter(Boolean) : []; - if (agentProfileId) { - scheduleToolOverheadRefresh(rootDir, agentProfileId, enabledMcpServers); - } const manifest = { agentProfileId: agentProfileId || null, enabledBuiltinTools, @@ -1040,17 +1018,6 @@ export async function execPrimaryPrompt(userMessage, options = {}) { : userMessage; const architectEditorFrame = buildArchitectEditorFrame(options, effectiveMode); const toolContract = buildPrimaryToolCapabilityContract(options); - const selectedAgentToolConfig = options.agentProfileId - ? getAgentToolConfig(rootDir, options.agentProfileId) - : null; - const selectedMcpServers = Array.isArray(selectedAgentToolConfig?.enabledMcpServers) - ? selectedAgentToolConfig.enabledMcpServers - .map((id) => String(id || "").trim()) - .filter(Boolean) - : []; - const selectedMcpServerSelection = selectedAgentToolConfig - ? selectedMcpServers - : undefined; const messageWithToolContract = [selectedProfile.block, architectEditorFrame, toolContract, messageWithAttachments] .filter(Boolean) .join("\n\n"); @@ -1074,7 +1041,6 @@ export async function execPrimaryPrompt(userMessage, options = {}) { cwd: options.cwd, model: effectiveModel, sdk: mapAdapterToPoolSdk(activeAdapter.name), - mcpServers: selectedMcpServerSelection, sessionType, }); const pooledText = @@ -1163,13 +1129,7 @@ export async function execPrimaryPrompt(userMessage, options = {}) { } } const result = await withTimeout( - adapter.exec(framedMessage, { - ...options, - sessionId, - model: effectiveModel, - abortController: timeoutAbort, - mcpServers: selectedMcpServerSelection, - }), + adapter.exec(framedMessage, { ...options, sessionId, model: effectiveModel, abortController: timeoutAbort }), timeoutMs, `${adapterName}.exec`, timeoutAbort, @@ -1238,13 +1198,7 @@ export async function execPrimaryPrompt(userMessage, options = {}) { } } const retryResult = await withTimeout( - adapter.exec(framedMessage, { - ...options, - sessionId, - model: effectiveModel, - abortController: timeoutAbort, - mcpServers: selectedMcpServerSelection, - }), + adapter.exec(framedMessage, { ...options, sessionId, model: effectiveModel, abortController: timeoutAbort }), timeoutMs, `${adapterName}.exec.retry`, timeoutAbort, @@ -1553,3 +1507,5 @@ export async function execSdkCommand(command, args = "", adapterName, options = } + + diff --git a/agent/review-agent.mjs b/agent/review-agent.mjs index 6104ac605..fb4a37fb2 100644 --- a/agent/review-agent.mjs +++ b/agent/review-agent.mjs @@ -24,44 +24,6 @@ const DEFAULT_REVIEW_TIMEOUT_MS = 5 * 60 * 1000; /** Default max concurrent reviews. */ const DEFAULT_MAX_CONCURRENT = 2; -function normalizeReviewDedupFragment(value) { - return String(value || "") - .trim() - .replaceAll(/\s+/g, " ") - .slice(0, 240); -} - -function buildReviewNotificationDedupKey(taskId, result) { - const issues = Array.isArray(result?.issues) ? result.issues : []; - const issueFingerprint = issues - .map((issue) => ({ - severity: normalizeReviewDedupFragment(issue?.severity), - category: normalizeReviewDedupFragment(issue?.category), - file: normalizeReviewDedupFragment(issue?.file), - line: Number.isFinite(Number(issue?.line)) ? Number(issue.line) : "", - })) - .sort((left, right) => - `${left.file}:${left.line}:${left.category}:${left.severity}`.localeCompare( - `${right.file}:${right.line}:${right.category}:${right.severity}`, - ), - ) - .map((issue) => - [ - issue.severity, - issue.category, - issue.file, - issue.line, - ].join(":"), - ) - .join("|"); - return [ - "review", - normalizeReviewDedupFragment(taskId), - result?.approved ? "approved" : "changes_requested", - issueFingerprint, - ].join("|"); -} - // --------------------------------------------------------------------------- // Review Prompt // --------------------------------------------------------------------------- @@ -664,23 +626,7 @@ export class ReviewAgent { .join("\n"); try { - this.#sendTelegram(message, { - dedupKey: buildReviewNotificationDedupKey(taskId, result), - exactDedup: true, - }); - } catch { - /* best effort */ - } - } - - // Send Telegram for approved reviews - if (result.approved && typeof this.#sendTelegram === "function") { - const message = `:check: Review: approved\nTask: ${taskId}\nSummary: ${result.summary || "No critical issues found"}`; - try { - this.#sendTelegram(message, { - dedupKey: buildReviewNotificationDedupKey(taskId, result), - exactDedup: true, - }); + this.#sendTelegram(message); } catch { /* best effort */ } diff --git a/bench/eval-framework.mjs b/bench/eval-framework.mjs deleted file mode 100644 index d2c314576..000000000 --- a/bench/eval-framework.mjs +++ /dev/null @@ -1,783 +0,0 @@ -import { mkdirSync, readFileSync, writeFileSync, readdirSync, existsSync } from "node:fs"; -import { basename, resolve } from "node:path"; -import { randomUUID } from "node:crypto"; - -const TASK_TYPES = Object.freeze([ - "code-generation", - "bug-fix", - "refactor", - "test-writing", - "code-review", -]); - -const BUILTIN_TASK_METRICS = Object.freeze({ - "code-generation": ["TaskSuccess", "TokenEfficiency", "TimeToComplete", "ContextUtilization"], - "bug-fix": ["TaskSuccess", "TokenEfficiency", "TimeToComplete", "TestPassRate"], - refactor: ["TaskSuccess", "TokenEfficiency", "TimeToComplete", "FalsePositiveRate"], - "test-writing": ["TaskSuccess", "TokenEfficiency", "TimeToComplete", "TestPassRate"], - "code-review": ["TaskSuccess", "TokenEfficiency", "TimeToComplete", "FalsePositiveRate"], -}); - -const BUILTIN_METRICS = Object.freeze([ - "TaskSuccess", - "TokenEfficiency", - "TimeToComplete", - "TestPassRate", - "FalsePositiveRate", - "ContextUtilization", -]); - -const DEFAULT_RESULTS_DIR = ".cache/eval-results"; -const DEFAULT_BENCHMARKS_DIR = "bench/benchmarks"; - -function mean(values = []) { - if (!Array.isArray(values) || values.length === 0) return 0; - return values.reduce((sum, value) => sum + Number(value || 0), 0) / values.length; -} - -function percentile(values = [], p = 95) { - if (!Array.isArray(values) || values.length === 0) return 0; - const sorted = values.map((value) => Number(value || 0)).sort((a, b) => a - b); - const index = Math.min(sorted.length - 1, Math.max(0, Math.ceil((p / 100) * sorted.length) - 1)); - return sorted[index] ?? 0; -} - -function ensureArray(value) { - return Array.isArray(value) ? value : value == null ? [] : [value]; -} - -function toNumber(value, fallback = 0) { - const numeric = Number(value); - return Number.isFinite(numeric) ? numeric : fallback; -} - -function normalizeTaskType(type) { - const normalized = String(type || "").trim().toLowerCase(); - if (TASK_TYPES.includes(normalized)) return normalized; - return "code-generation"; -} - -function defaultMetricsForTask(type) { - return [...(BUILTIN_TASK_METRICS[normalizeTaskType(type)] || BUILTIN_METRICS)]; -} - -function parseJsonFile(filePath) { - return JSON.parse(readFileSync(resolve(filePath), "utf8")); -} - -function dedupeStrings(values = []) { - const output = []; - for (const value of ensureArray(values)) { - const normalized = String(value || "").trim(); - if (!normalized || output.includes(normalized)) continue; - output.push(normalized); - } - return output; -} - -function escapeRegex(text) { - return String(text || "").replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); -} - -function normalCdf(value) { - const x = Number(value || 0); - const sign = x < 0 ? -1 : 1; - const abs = Math.abs(x) / Math.sqrt(2); - const t = 1 / (1 + 0.3275911 * abs); - const a1 = 0.254829592; - const a2 = -0.284496736; - const a3 = 1.421413741; - const a4 = -1.453152027; - const a5 = 1.061405429; - const erf = 1 - (((((a5 * t) + a4) * t + a3) * t + a2) * t + a1) * t * Math.exp(-(abs * abs)); - return 0.5 * (1 + sign * erf); -} - -function zScoreToPValue(score) { - const normalized = Math.abs(Number(score || 0)); - return Math.max(0, Math.min(1, 2 * (1 - normalCdf(normalized)))); -} - -function computeSignificance(baseValues = [], candidateValues = []) { - const baseline = ensureArray(baseValues).map((value) => toNumber(value)).filter((value) => Number.isFinite(value)); - const candidate = ensureArray(candidateValues).map((value) => toNumber(value)).filter((value) => Number.isFinite(value)); - if (baseline.length === 0 || candidate.length === 0) { - return { score: 0, pValue: 1, method: "insufficient-data" }; - } - const baselineMean = mean(baseline); - const candidateMean = mean(candidate); - const baselineVariance = mean(baseline.map((value) => (value - baselineMean) ** 2)); - const candidateVariance = mean(candidate.map((value) => (value - candidateMean) ** 2)); - const denominator = Math.sqrt( - (baselineVariance / Math.max(1, baseline.length)) + - (candidateVariance / Math.max(1, candidate.length)), - ); - if (!Number.isFinite(denominator) || denominator === 0) { - return { - score: candidateMean === baselineMean ? 0 : Number.POSITIVE_INFINITY, - pValue: candidateMean === baselineMean ? 1 : 0, - method: "welch-z", - }; - } - const score = (candidateMean - baselineMean) / denominator; - return { - score, - pValue: zScoreToPValue(score), - method: "welch-z", - }; -} - -function normalizeMetricName(value) { - return String(value || "").trim(); -} - -function calculateCostFromOutcome(outcome = {}, strategy = {}) { - const totalTokens = toNumber(outcome.totalTokens, toNumber(outcome.tokensInput) + toNumber(outcome.tokensOutput)); - const rate = toNumber( - strategy.costPerMillionTokens ?? strategy.costRatePerMillion ?? strategy.tokenCostPerMillion, - 0, - ); - if (!rate || totalTokens <= 0) return 0; - return (totalTokens / 1_000_000) * rate; -} - -export class Task { - constructor(raw = {}) { - this.id = String(raw.id || "").trim() || `task-${randomUUID()}`; - this.type = normalizeTaskType(raw.type); - this.input = raw.input && typeof raw.input === "object" ? { ...raw.input } : { prompt: "", repoState: {} }; - this.groundTruth = raw.groundTruth && typeof raw.groundTruth === "object" ? { ...raw.groundTruth } : {}; - this.metrics = ensureArray(raw.metrics).length > 0 - ? dedupeStrings(ensureArray(raw.metrics).map((metric) => normalizeMetricName(metric))) - : defaultMetricsForTask(this.type); - this.tags = raw.tags && typeof raw.tags === "object" && !Array.isArray(raw.tags) ? { ...raw.tags } : {}; - } -} - -export class Benchmark { - constructor(raw = {}) { - this.id = String(raw.id || raw.name || "benchmark").trim() || "benchmark"; - this.name = String(raw.name || raw.id || "benchmark").trim() || "benchmark"; - this.description = String(raw.description || "").trim(); - this.tasks = ensureArray(raw.tasks).map((task) => task instanceof Task ? task : new Task(task)); - this.tags = raw.tags && typeof raw.tags === "object" && !Array.isArray(raw.tags) ? { ...raw.tags } : {}; - this.sourcePath = String(raw.sourcePath || "").trim(); - } -} - -export class Metric { - constructor(name, evaluator, options = {}) { - this.name = normalizeMetricName(name); - this.evaluator = typeof evaluator === "function" ? evaluator : (() => null); - this.kind = String(options.kind || "metric"); - this.description = String(options.description || "").trim(); - } - - evaluate(context) { - return this.evaluator(context); - } -} - -export class CategoryMetric extends Metric { - constructor(name, evaluator, options = {}) { - super(name, evaluator, { ...options, kind: "category" }); - } -} - -export class NumericalMetric extends Metric { - constructor(name, evaluator, options = {}) { - super(name, evaluator, { ...options, kind: "numerical" }); - } -} - -function defaultRunner() { - return async () => ({ - success: false, - durationMs: 0, - tokensInput: 0, - tokensOutput: 0, - filesChanged: 0, - testsPassed: 0, - testsTotal: 0, - falsePositives: 0, - contextBytes: 0, - contextBudgetBytes: 0, - costUsd: 0, - }); -} - -export function builtInMetricInstances() { - return [ - new CategoryMetric("TaskSuccess", ({ outcome }) => outcome.success ? "pass" : "fail"), - new NumericalMetric("TokenEfficiency", ({ outcome }) => { - const totalTokens = toNumber(outcome.totalTokens, toNumber(outcome.tokensInput) + toNumber(outcome.tokensOutput)); - return totalTokens / Math.max(1, toNumber(outcome.filesChanged, 1)); - }), - new NumericalMetric("TimeToComplete", ({ outcome }) => toNumber(outcome.durationMs)), - new NumericalMetric("TestPassRate", ({ outcome }) => { - const total = Math.max(0, toNumber(outcome.testsTotal)); - if (total === 0) return outcome.success ? 1 : 0; - return toNumber(outcome.testsPassed) / total; - }), - new NumericalMetric("FalsePositiveRate", ({ outcome }) => { - const reviewedCount = Math.max(1, toNumber(outcome.findingsTotal, toNumber(outcome.filesChanged, 1))); - return toNumber(outcome.falsePositives) / reviewedCount; - }), - new NumericalMetric("ContextUtilization", ({ outcome }) => { - const budget = Math.max(0, toNumber(outcome.contextBudgetBytes)); - if (budget === 0) return 0; - return toNumber(outcome.contextBytes) / budget; - }), - ]; -} - -function mapMetrics(metrics = []) { - const metricMap = new Map(); - for (const metric of [...builtInMetricInstances(), ...metrics]) { - if (!metric?.name) continue; - metricMap.set(metric.name, metric); - } - return metricMap; -} - -function buildTaskSummaryEntries(results = []) { - const byTask = new Map(); - for (const result of ensureArray(results)) { - const taskId = String(result?.taskId || "").trim(); - if (!taskId) continue; - if (!byTask.has(taskId)) { - byTask.set(taskId, { - taskId, - taskType: result.taskType || "code-generation", - repeats: 0, - passCount: 0, - tokenValues: [], - timeValues: [], - costValues: [], - resultIndexes: [], - }); - } - const entry = byTask.get(taskId); - entry.repeats += 1; - if (result.metrics?.TaskSuccess === "pass") entry.passCount += 1; - entry.tokenValues.push(toNumber(result.outcome?.totalTokens, toNumber(result.outcome?.tokensInput) + toNumber(result.outcome?.tokensOutput))); - entry.timeValues.push(toNumber(result.outcome?.durationMs)); - entry.costValues.push(toNumber(result.outcome?.costUsd)); - entry.resultIndexes.push(result.resultIndex); - } - return [...byTask.values()].map((entry) => ({ - taskId: entry.taskId, - taskType: entry.taskType, - repeats: entry.repeats, - passRate: entry.repeats > 0 ? entry.passCount / entry.repeats : 0, - avgTokens: mean(entry.tokenValues), - avgTimeMs: mean(entry.timeValues), - avgCostUsd: mean(entry.costValues), - })); -} - -function summarizeRun(run) { - const results = ensureArray(run.results); - const timeValues = []; - const tokenValues = []; - const costValues = []; - let passCount = 0; - for (const result of results) { - const metrics = result.metrics || {}; - const totalTokens = toNumber(result.outcome?.totalTokens, toNumber(result.outcome?.tokensInput) + toNumber(result.outcome?.tokensOutput)); - const totalCost = toNumber(result.outcome?.costUsd); - if (metrics.TaskSuccess === "pass") passCount += 1; - timeValues.push(toNumber(metrics.TimeToComplete, result.outcome?.durationMs)); - tokenValues.push(totalTokens); - costValues.push(totalCost); - } - return { - totalTasks: results.length, - passRate: results.length > 0 ? passCount / results.length : 0, - avgTokens: mean(tokenValues), - p95Tokens: percentile(tokenValues, 95), - avgTimeMs: mean(timeValues), - p95TimeMs: percentile(timeValues, 95), - totalCostUsd: costValues.reduce((sum, value) => sum + value, 0), - avgCostUsd: mean(costValues), - perTask: buildTaskSummaryEntries(results), - }; -} - -function normalizeStrategy(raw = {}, index = 0) { - const strategy = raw && typeof raw === "object" ? { ...raw } : { id: String(raw || "") }; - const id = String(strategy.id || strategy.name || `strategy-${index + 1}`).trim() || `strategy-${index + 1}`; - return { - id, - label: String(strategy.label || strategy.name || id).trim() || id, - sdk: String(strategy.sdk || "").trim(), - model: String(strategy.model || "").trim(), - promptStrategy: String(strategy.promptStrategy || strategy.prompt || "").trim(), - codebaseProfile: String(strategy.codebaseProfile || strategy.repoProfile || "").trim(), - annotated: strategy.annotated === true, - unannotated: strategy.unannotated === true, - config: strategy.config && typeof strategy.config === "object" ? { ...strategy.config } : {}, - costPerMillionTokens: toNumber(strategy.costPerMillionTokens ?? strategy.costRatePerMillion, 0), - metadata: strategy.metadata && typeof strategy.metadata === "object" ? { ...strategy.metadata } : {}, - }; -} - -function loadRunFromFile(runPath) { - return parseJsonFile(runPath); -} - -function readRunById(resultsDir, runId) { - return loadRunFromFile(resolveEvalResultPath(resultsDir, runId)); -} - -export class Evaluator { - constructor(options = {}) { - this.resultsDir = resolve(options.resultsDir || DEFAULT_RESULTS_DIR); - this.metrics = ensureArray(options.metrics); - this.runner = options.runner || defaultRunner(); - this.parallelism = Math.max(1, toNumber(options.parallelism, 1)); - this.storageAdapter = options.storageAdapter || null; - } - - async evaluate({ benchmark, repeats = 1, strategies = [] } = {}) { - const normalizedBenchmark = benchmark instanceof Benchmark ? benchmark : new Benchmark(benchmark || {}); - const normalizedStrategies = ensureArray(strategies).length > 0 - ? ensureArray(strategies).map((strategy, index) => normalizeStrategy(strategy, index)) - : [normalizeStrategy({ id: "default", label: "Default" })]; - const metricMap = mapMetrics(this.metrics); - const runId = `eval-${Date.now()}-${randomUUID()}`; - const results = []; - let resultIndex = 0; - - for (const strategy of normalizedStrategies) { - for (const task of normalizedBenchmark.tasks) { - for (let repeatIndex = 0; repeatIndex < Math.max(1, repeats); repeatIndex += 1) { - const outcome = { - ...(await this.runner({ benchmark: normalizedBenchmark, task, strategy, repeatIndex })), - }; - outcome.totalTokens = toNumber( - outcome.totalTokens, - toNumber(outcome.tokensInput) + toNumber(outcome.tokensOutput), - ); - outcome.costUsd = toNumber( - outcome.costUsd, - calculateCostFromOutcome(outcome, strategy), - ); - const metricResults = {}; - for (const metricName of task.metrics) { - const metric = metricMap.get(metricName); - if (!metric) continue; - metricResults[metricName] = metric.evaluate({ - benchmark: normalizedBenchmark, - task, - strategy, - repeatIndex, - outcome, - }); - } - results.push({ - resultIndex: resultIndex++, - strategyId: strategy.id, - strategyLabel: strategy.label, - strategy, - taskId: task.id, - taskType: task.type, - repeatIndex, - metrics: metricResults, - outcome, - }); - } - } - } - - const summary = summarizeRun({ results }); - const run = { - runId, - benchmarkId: normalizedBenchmark.id, - benchmark: normalizedBenchmark.name, - benchmarkDescription: normalizedBenchmark.description, - repeats: Math.max(1, repeats), - strategyIds: normalizedStrategies.map((strategy) => strategy.id), - strategies: normalizedStrategies, - parallelism: this.parallelism, - createdAt: new Date().toISOString(), - results, - summary, - }; - - mkdirSync(this.resultsDir, { recursive: true }); - const resultPath = resolve(this.resultsDir, `${runId}.json`); - writeFileSync(resultPath, JSON.stringify(run, null, 2) + "\n", "utf8"); - if (this.storageAdapter && typeof this.storageAdapter.writeRun === "function") { - await this.storageAdapter.writeRun(run, resultPath); - } - return { ...run, resultPath }; - } -} - -export async function importBenchmarkFromFile(filePath) { - const raw = parseJsonFile(filePath); - return new Benchmark({ ...raw, sourcePath: resolve(filePath) }); -} - -export function compareEvaluationRuns(baseline, candidate) { - const metricKeys = ["passRate", "avgTokens", "p95Tokens", "avgTimeMs", "p95TimeMs", "avgCostUsd", "totalCostUsd"]; - const metricDeltas = {}; - for (const key of metricKeys) { - const baseValue = toNumber(baseline?.summary?.[key]); - const candidateValue = toNumber(candidate?.summary?.[key]); - const delta = candidateValue - baseValue; - const baselineTaskValues = ensureArray(baseline?.summary?.perTask).map((entry) => toNumber(entry?.[key])); - const candidateTaskValues = ensureArray(candidate?.summary?.perTask).map((entry) => toNumber(entry?.[key])); - metricDeltas[key] = { - baseline: baseValue, - candidate: candidateValue, - delta, - significance: computeSignificance(baselineTaskValues, candidateTaskValues), - }; - } - - const baselineMap = new Map(ensureArray(baseline?.summary?.perTask).map((entry) => [entry.taskId, entry])); - const candidateMap = new Map(ensureArray(candidate?.summary?.perTask).map((entry) => [entry.taskId, entry])); - const improved = []; - const regressed = []; - const unchanged = []; - - for (const [taskId, baselineResult] of baselineMap.entries()) { - const candidateResult = candidateMap.get(taskId); - if (!candidateResult) continue; - const passDelta = toNumber(candidateResult.passRate) - toNumber(baselineResult.passRate); - const tokenDelta = toNumber(candidateResult.avgTokens) - toNumber(baselineResult.avgTokens); - const timeDelta = toNumber(candidateResult.avgTimeMs) - toNumber(baselineResult.avgTimeMs); - const record = { - taskId, - baseline: baselineResult, - candidate: candidateResult, - passRateDelta: passDelta, - avgTokensDelta: tokenDelta, - avgTimeMsDelta: timeDelta, - }; - if (passDelta > 0) improved.push(record); - else if (passDelta < 0) regressed.push(record); - else unchanged.push(record); - } - - return { - baselineRunId: baseline?.runId || "", - candidateRunId: candidate?.runId || "", - metricDeltas, - perTask: { improved, regressed, unchanged }, - }; -} - -export function summarizeMatrix(runs = []) { - const rows = ensureArray(runs).map((run) => ({ - config: ensureArray(run.strategyIds)[0] || run.strategyId || "default", - passRate: toNumber(run.summary?.passRate), - avgTokens: toNumber(run.summary?.avgTokens), - p95Tokens: toNumber(run.summary?.p95Tokens), - avgTimeMs: toNumber(run.summary?.avgTimeMs), - p95TimeMs: toNumber(run.summary?.p95TimeMs), - cost: toNumber(run.summary?.totalCostUsd, toNumber(run.summary?.avgCostUsd)), - })); - return { rows }; -} - -export function compareAuditImpactRuns(withAnnotations, withoutAnnotations) { - const comparison = compareEvaluationRuns(withoutAnnotations, withAnnotations); - const rows = [ - ["Pass Rate", withAnnotations?.summary?.passRate, withoutAnnotations?.summary?.passRate], - ["Avg Tokens", withAnnotations?.summary?.avgTokens, withoutAnnotations?.summary?.avgTokens], - ["Avg Time (ms)", withAnnotations?.summary?.avgTimeMs, withoutAnnotations?.summary?.avgTimeMs], - ["False Positive Rate", mean(ensureArray(withAnnotations?.results).map((entry) => toNumber(entry?.metrics?.FalsePositiveRate))), mean(ensureArray(withoutAnnotations?.results).map((entry) => toNumber(entry?.metrics?.FalsePositiveRate)))], - ].map(([metric, withValue, withoutValue]) => ({ - metric, - withAnnotations: toNumber(withValue), - withoutAnnotations: toNumber(withoutValue), - delta: toNumber(withValue) - toNumber(withoutValue), - })); - return { - comparison, - rows, - }; -} - -export function resolveEvalResultPath(resultsDir, runId) { - return resolve(resultsDir || DEFAULT_RESULTS_DIR, `${basename(String(runId || "").replace(/\.json$/i, ""))}.json`); -} - -export function listStoredEvaluationRuns(resultsDir = DEFAULT_RESULTS_DIR) { - const dir = resolve(resultsDir); - if (!existsSync(dir)) return []; - return readdirSync(dir) - .filter((name) => name.startsWith("eval-") && name.endsWith(".json")) - .sort() - .map((name) => ({ - runId: name.replace(/\.json$/i, ""), - path: resolve(dir, name), - })); -} - -export function detectRegression(currentRun, baselineRun, thresholds = {}) { - const maxTokenRegression = toNumber(thresholds.maxTokenRegression, Infinity); - const minPassRate = thresholds.minPassRate == null ? -Infinity : toNumber(thresholds.minPassRate); - const tokenRegression = toNumber(currentRun?.summary?.avgTokens) - toNumber(baselineRun?.summary?.avgTokens); - const tokenRegressionRatio = toNumber(baselineRun?.summary?.avgTokens) === 0 - ? (tokenRegression > 0 ? Infinity : 0) - : tokenRegression / Math.max(1e-9, toNumber(baselineRun?.summary?.avgTokens)); - const passRate = toNumber(currentRun?.summary?.passRate); - const failures = []; - if (Number.isFinite(maxTokenRegression) && tokenRegressionRatio > maxTokenRegression) { - failures.push({ - metric: "avgTokens", - actual: tokenRegressionRatio, - threshold: maxTokenRegression, - message: `Average token regression ${tokenRegressionRatio.toFixed(4)} exceeds ${maxTokenRegression.toFixed(4)}`, - }); - } - if (passRate < minPassRate) { - failures.push({ - metric: "passRate", - actual: passRate, - threshold: minPassRate, - message: `Pass rate ${passRate.toFixed(4)} is below ${minPassRate.toFixed(4)}`, - }); - } - return { - ok: failures.length === 0, - failures, - }; -} - -export function summarizeHistory(runs = []) { - const ordered = ensureArray(runs) - .map((run) => ({ - runId: run.runId, - createdAt: run.createdAt || "", - benchmark: run.benchmark || run.benchmarkId || "", - passRate: toNumber(run.summary?.passRate), - avgTokens: toNumber(run.summary?.avgTokens), - avgTimeMs: toNumber(run.summary?.avgTimeMs), - totalCostUsd: toNumber(run.summary?.totalCostUsd), - })) - .sort((a, b) => String(a.createdAt).localeCompare(String(b.createdAt))); - const regressions = []; - for (let index = 1; index < ordered.length; index += 1) { - const previous = ordered[index - 1]; - const current = ordered[index]; - if (current.passRate < previous.passRate || current.avgTokens > previous.avgTokens) { - regressions.push({ - fromRunId: previous.runId, - toRunId: current.runId, - passRateDelta: current.passRate - previous.passRate, - avgTokensDelta: current.avgTokens - previous.avgTokens, - }); - } - } - return { runs: ordered, regressions }; -} - -function renderMatrixTable(rows = []) { - const header = ["Config", "Pass Rate", "Avg Tokens", "Avg Time", "Cost"]; - const tableRows = ensureArray(rows).map((row) => [ - row.config, - `${(toNumber(row.passRate) * 100).toFixed(1)}%`, - Math.round(toNumber(row.avgTokens)).toString(), - `${(toNumber(row.avgTimeMs) / 1000).toFixed(1)}s`, - `$${toNumber(row.cost).toFixed(4)}`, - ]); - return [header, ...tableRows].map((cells) => `| ${cells.join(" | ")} |`).join("\n"); -} - -function printUsage() { - console.log(`Bosun evaluation framework\n\nUsage:\n bosun eval import \n bosun eval run --benchmark [--repeats N] [--config id] [--results-dir dir]\n bosun eval compare [--results-dir dir]\n bosun eval matrix --benchmark [--repeats N] [--configs a,b] [--results-dir dir]\n bosun eval audit-impact --with --without [--results-dir dir]\n bosun eval ci --baseline --candidate [--max-token-regression 0.10] [--min-pass-rate 0.85]\n bosun eval history [--results-dir dir]\n`); -} - -function getArgValue(args, flag) { - const inline = args.find((entry) => entry.startsWith(`${flag}=`)); - if (inline) return inline.slice(flag.length + 1); - const index = args.indexOf(flag); - return index >= 0 ? args[index + 1] : ""; -} - -function hasFlag(args, flag) { - return args.includes(flag); -} - -function parseConfigList(rawValue) { - return String(rawValue || "") - .split(",") - .map((value) => value.trim()) - .filter(Boolean); -} - -function resolveBenchmarkPath(input = "", options = {}) { - const value = String(input || "").trim(); - if (!value) return ""; - if (existsSync(resolve(value))) return resolve(value); - const benchmarksDir = resolve(options.benchmarksDir || DEFAULT_BENCHMARKS_DIR); - const candidates = [ - resolve(benchmarksDir, value), - resolve(benchmarksDir, `${value}.json`), - ]; - return candidates.find((candidate) => existsSync(candidate)) || resolve(value); -} - -function resolveRunInput(input, resultsDir) { - const value = String(input || "").trim(); - if (!value) throw new Error("Run identifier is required"); - if (existsSync(resolve(value))) return loadRunFromFile(resolve(value)); - return readRunById(resultsDir, value); -} - -function createSyntheticRunner() { - return async ({ task, strategy, repeatIndex }) => { - const promptLength = String(task?.input?.prompt || "").length; - const taskWeight = TASK_TYPES.indexOf(task?.type) + 1; - const strategyWeight = Math.max(1, String(strategy?.id || "default").length % 7); - const success = !String(strategy?.id || "").toLowerCase().includes("fail"); - return { - success, - durationMs: 1000 + (repeatIndex * 125) + (taskWeight * 200) + (strategyWeight * 50), - tokensInput: 800 + promptLength + (taskWeight * 75), - tokensOutput: 300 + (repeatIndex * 20) + (strategyWeight * 15), - filesChanged: Math.max(1, taskWeight - 1), - testsPassed: success ? Math.max(1, taskWeight) : Math.max(0, taskWeight - 1), - testsTotal: Math.max(1, taskWeight), - falsePositives: task?.type === "code-review" && success ? 0 : (task?.type === "code-review" ? 1 : 0), - contextBytes: 2048 + promptLength, - contextBudgetBytes: 8192, - }; - }; -} - -export async function runEvalCli(args = []) { - const [command, ...rest] = args; - if (!command || command === "--help" || command === "-h") { - printUsage(); - return { exitCode: 0 }; - } - - if (command === "import") { - const filePath = rest[0]; - if (!filePath) { - console.error("Usage: bosun eval import "); - return { exitCode: 1 }; - } - const benchmark = await importBenchmarkFromFile(filePath); - console.log(`Imported benchmark ${benchmark.name}: tasks=${benchmark.tasks.length}`); - return { exitCode: 0, benchmark }; - } - - if (command === "run") { - const resultsDir = getArgValue(rest, "--results-dir") || DEFAULT_RESULTS_DIR; - const benchmarkPath = resolveBenchmarkPath(getArgValue(rest, "--benchmark"), { - benchmarksDir: getArgValue(rest, "--benchmarks-dir") || DEFAULT_BENCHMARKS_DIR, - }); - const repeats = Math.max(1, toNumber(getArgValue(rest, "--repeats"), 1)); - const configId = getArgValue(rest, "--config") || "default"; - if (!benchmarkPath) { - console.error("Usage: bosun eval run --benchmark [--repeats N] [--config id]"); - return { exitCode: 1 }; - } - const benchmark = await importBenchmarkFromFile(benchmarkPath); - const evaluator = new Evaluator({ resultsDir, runner: createSyntheticRunner() }); - const run = await evaluator.evaluate({ - benchmark, - repeats, - strategies: [{ id: configId, label: configId }], - }); - console.log(JSON.stringify({ runId: run.runId, resultPath: run.resultPath, summary: run.summary }, null, 2)); - return { exitCode: 0, run }; - } - - if (command === "compare") { - const resultsDir = getArgValue(rest, "--results-dir") || DEFAULT_RESULTS_DIR; - const [runAPath, runBPath] = rest.filter((entry) => !/^--/.test(entry)); - if (!runAPath || !runBPath) { - console.error("Usage: bosun eval compare "); - return { exitCode: 1 }; - } - const baseline = resolveRunInput(runAPath, resultsDir); - const candidate = resolveRunInput(runBPath, resultsDir); - const comparison = compareEvaluationRuns(baseline, candidate); - console.log(JSON.stringify(comparison, null, 2)); - return { exitCode: 0, comparison }; - } - - if (command === "matrix") { - const resultsDir = getArgValue(rest, "--results-dir") || DEFAULT_RESULTS_DIR; - const benchmarkPath = resolveBenchmarkPath(getArgValue(rest, "--benchmark"), { - benchmarksDir: getArgValue(rest, "--benchmarks-dir") || DEFAULT_BENCHMARKS_DIR, - }); - const repeats = Math.max(1, toNumber(getArgValue(rest, "--repeats"), 1)); - const configs = parseConfigList(getArgValue(rest, "--configs") || "default"); - if (!benchmarkPath) { - console.error("Usage: bosun eval matrix --benchmark [--repeats N] [--configs a,b]"); - return { exitCode: 1 }; - } - const benchmark = await importBenchmarkFromFile(benchmarkPath); - const evaluator = new Evaluator({ resultsDir, runner: createSyntheticRunner() }); - const runs = []; - for (const configId of configs) { - const run = await evaluator.evaluate({ - benchmark, - repeats, - strategies: [{ id: configId, label: configId }], - }); - runs.push(run); - } - const matrix = summarizeMatrix(runs); - console.log(renderMatrixTable(matrix.rows)); - return { exitCode: 0, runs, matrix }; - } - - if (command === "audit-impact") { - const resultsDir = getArgValue(rest, "--results-dir") || DEFAULT_RESULTS_DIR; - const withInput = getArgValue(rest, "--with"); - const withoutInput = getArgValue(rest, "--without"); - if (!withInput || !withoutInput) { - console.error("Usage: bosun eval audit-impact --with --without "); - return { exitCode: 1 }; - } - const withAnnotations = resolveRunInput(withInput, resultsDir); - const withoutAnnotations = resolveRunInput(withoutInput, resultsDir); - const impact = compareAuditImpactRuns(withAnnotations, withoutAnnotations); - console.log(JSON.stringify(impact, null, 2)); - return { exitCode: 0, impact }; - } - - if (command === "ci") { - const resultsDir = getArgValue(rest, "--results-dir") || DEFAULT_RESULTS_DIR; - const baselineInput = getArgValue(rest, "--baseline"); - const candidateInput = getArgValue(rest, "--candidate"); - if (!baselineInput || !candidateInput) { - console.error("Usage: bosun eval ci --baseline --candidate "); - return { exitCode: 1 }; - } - const baseline = resolveRunInput(baselineInput, resultsDir); - const candidate = resolveRunInput(candidateInput, resultsDir); - const regression = detectRegression(candidate, baseline, { - maxTokenRegression: toNumber(getArgValue(rest, "--max-token-regression"), 0.1), - minPassRate: toNumber(getArgValue(rest, "--min-pass-rate"), 0.85), - }); - if (!regression.ok) { - console.error(JSON.stringify(regression, null, 2)); - return { exitCode: 1, regression }; - } - console.log(JSON.stringify(regression, null, 2)); - return { exitCode: 0, regression }; - } - - if (command === "history") { - const resultsDir = getArgValue(rest, "--results-dir") || DEFAULT_RESULTS_DIR; - const runs = listStoredEvaluationRuns(resultsDir).map((entry) => loadRunFromFile(entry.path)); - const history = summarizeHistory(runs); - console.log(JSON.stringify(history, null, 2)); - return { exitCode: 0, history }; - } - - console.error(`Unknown eval command: ${command}`); - printUsage(); - return { exitCode: 1 }; -} diff --git a/bosun.schema.json b/bosun.schema.json index 5ead04190..288a546fa 100644 --- a/bosun.schema.json +++ b/bosun.schema.json @@ -108,10 +108,6 @@ ] }, "description": "Override reusable relative paths to link from the repo root into a managed worktree for each detected ecosystem." - }, - "worktreeSetupScript": { - "type": "string", - "description": "Global shell command (or commands joined with &&) run once in every new managed worktree across all repos, before per-repo environment commands. Per-repo environment.worktreeSetupScript overrides this." } } }, @@ -1700,51 +1696,6 @@ }, "telegramPollLockPath": { "type": "string" - }, - "environment": { - "type": "object", - "description": "Per-repo environment configuration for worktree setup and agent operations. Overrides global worktreeBootstrap settings for this repository.", - "additionalProperties": true, - "properties": { - "template": { - "type": "string", - "description": "Environment template identifier (e.g. 'node-ts', 'python-poetry', 'rust', 'dotnet-csharp'). Used to populate defaults." - }, - "installCommands": { - "type": "array", - "items": { "type": "string" }, - "description": "Ordered commands to install dependencies in a fresh worktree. Supports 'auto' as a single-element array to auto-detect." - }, - "startCommand": { - "type": "string", - "description": "Command to start the development server or main process." - }, - "buildCommand": { - "type": "string", - "description": "Command to produce build artifacts. Supports 'auto'." - }, - "testCommand": { - "type": "string", - "description": "Command to run the test suite. Supports 'auto'." - }, - "lintCommand": { - "type": "string", - "description": "Command to lint and style-check the codebase. Supports 'auto'." - }, - "debugCommand": { - "type": "string", - "description": "Command to start a debug session." - }, - "worktreeSetupScript": { - "type": "string", - "description": "Shell command (or commands joined with &&) run once in every new managed worktree before install commands." - }, - "sharedPaths": { - "type": "array", - "items": { "type": "string" }, - "description": "Relative paths to symlink from the repo root into each managed worktree (e.g. node_modules, vendor)." - } - } } } }, diff --git a/ci-job-69313532999.zip b/ci-job-69313532999.zip new file mode 100644 index 000000000..d322d2ed8 --- /dev/null +++ b/ci-job-69313532999.zip @@ -0,0 +1,210 @@ +2026-03-31T08:12:08.4575442Z Current runner version: '2.333.1' +2026-03-31T08:12:08.4599605Z ##[group]Runner Image Provisioner +2026-03-31T08:12:08.4600458Z Hosted Compute Agent +2026-03-31T08:12:08.4600994Z Version: 20260213.493 +2026-03-31T08:12:08.4601659Z Commit: 5c115507f6dd24b8de37d8bbe0bb4509d0cc0fa3 +2026-03-31T08:12:08.4602713Z Build Date: 2026-02-13T00:28:41Z +2026-03-31T08:12:08.4603397Z Worker ID: {182b776f-e7ef-4259-b14d-df52dbeda612} +2026-03-31T08:12:08.4604194Z Azure Region: westus2 +2026-03-31T08:12:08.4604745Z ##[endgroup] +2026-03-31T08:12:08.4606193Z ##[group]Operating System +2026-03-31T08:12:08.4606770Z Ubuntu +2026-03-31T08:12:08.4607567Z 24.04.4 +2026-03-31T08:12:08.4608047Z LTS +2026-03-31T08:12:08.4608575Z ##[endgroup] +2026-03-31T08:12:08.4609146Z ##[group]Runner Image +2026-03-31T08:12:08.4609708Z Image: ubuntu-24.04 +2026-03-31T08:12:08.4610219Z Version: 20260323.65.1 +2026-03-31T08:12:08.4611430Z Included Software: https://github.com/actions/runner-images/blob/ubuntu24/20260323.65/images/ubuntu/Ubuntu2404-Readme.md +2026-03-31T08:12:08.4613154Z Image Release: https://github.com/actions/runner-images/releases/tag/ubuntu24%2F20260323.65 +2026-03-31T08:12:08.4614151Z ##[endgroup] +2026-03-31T08:12:08.4615163Z ##[group]GITHUB_TOKEN Permissions +2026-03-31T08:12:08.4617066Z Contents: read +2026-03-31T08:12:08.4617608Z Metadata: read +2026-03-31T08:12:08.4618183Z ##[endgroup] +2026-03-31T08:12:08.4620186Z Secret source: Actions +2026-03-31T08:12:08.4620936Z Prepare workflow directory +2026-03-31T08:12:08.4947407Z Prepare all required actions +2026-03-31T08:12:08.4985382Z Getting action download info +2026-03-31T08:12:08.9687692Z Download action repository 'actions/checkout@v4' (SHA:34e114876b0b11c390a56381ad16ebd13914f8d5) +2026-03-31T08:12:09.0876793Z Download action repository 'actions/setup-node@v4' (SHA:49933ea5288caeca8642d1e84afbd3f7d6820020) +2026-03-31T08:12:09.3616121Z Complete job name: Build + Tests +2026-03-31T08:12:09.4337813Z ##[group]Run actions/checkout@v4 +2026-03-31T08:12:09.4338634Z with: +2026-03-31T08:12:09.4339038Z repository: virtengine/bosun +2026-03-31T08:12:09.4339686Z token: *** +2026-03-31T08:12:09.4340075Z ssh-strict: true +2026-03-31T08:12:09.4340503Z ssh-user: git +2026-03-31T08:12:09.4340908Z persist-credentials: true +2026-03-31T08:12:09.4341349Z clean: true +2026-03-31T08:12:09.4341747Z sparse-checkout-cone-mode: true +2026-03-31T08:12:09.4342542Z fetch-depth: 1 +2026-03-31T08:12:09.4342932Z fetch-tags: false +2026-03-31T08:12:09.4343321Z show-progress: true +2026-03-31T08:12:09.4343713Z lfs: false +2026-03-31T08:12:09.4344083Z submodules: false +2026-03-31T08:12:09.4344475Z set-safe-directory: true +2026-03-31T08:12:09.4345209Z ##[endgroup] +2026-03-31T08:12:09.5447704Z Syncing repository: virtengine/bosun +2026-03-31T08:12:09.5449411Z ##[group]Getting Git version info +2026-03-31T08:12:09.5450071Z Working directory is '/home/runner/work/bosun/bosun' +2026-03-31T08:12:09.5451073Z [command]/usr/bin/git version +2026-03-31T08:12:09.5487284Z git version 2.53.0 +2026-03-31T08:12:09.5515730Z ##[endgroup] +2026-03-31T08:12:09.5531109Z Temporarily overriding HOME='/home/runner/work/_temp/d64d0500-892f-42a3-bbf0-bc662c36db8d' before making global git config changes +2026-03-31T08:12:09.5533824Z Adding repository directory to the temporary git global config as a safe directory +2026-03-31T08:12:09.5536005Z [command]/usr/bin/git config --global --add safe.directory /home/runner/work/bosun/bosun +2026-03-31T08:12:09.5568111Z Deleting the contents of '/home/runner/work/bosun/bosun' +2026-03-31T08:12:09.5571966Z ##[group]Initializing the repository +2026-03-31T08:12:09.5576162Z [command]/usr/bin/git init /home/runner/work/bosun/bosun +2026-03-31T08:12:09.5649597Z hint: Using 'master' as the name for the initial branch. This default branch name +2026-03-31T08:12:09.5651278Z hint: will change to "main" in Git 3.0. To configure the initial branch name +2026-03-31T08:12:09.5654293Z hint: to use in all of your new repositories, which will suppress this warning, +2026-03-31T08:12:09.5655656Z hint: call: +2026-03-31T08:12:09.5656438Z hint: +2026-03-31T08:12:09.5657676Z hint: git config --global init.defaultBranch +2026-03-31T08:12:09.5658723Z hint: +2026-03-31T08:12:09.5659678Z hint: Names commonly chosen instead of 'master' are 'main', 'trunk' and +2026-03-31T08:12:09.5661313Z hint: 'development'. The just-created branch can be renamed via this command: +2026-03-31T08:12:09.5663277Z hint: +2026-03-31T08:12:09.5664000Z hint: git branch -m +2026-03-31T08:12:09.5664798Z hint: +2026-03-31T08:12:09.5665875Z hint: Disable this message with "git config set advice.defaultBranchName false" +2026-03-31T08:12:09.5667561Z Initialized empty Git repository in /home/runner/work/bosun/bosun/.git/ +2026-03-31T08:12:09.5670303Z [command]/usr/bin/git remote add origin https://github.com/virtengine/bosun +2026-03-31T08:12:09.5688771Z ##[endgroup] +2026-03-31T08:12:09.5690322Z ##[group]Disabling automatic garbage collection +2026-03-31T08:12:09.5693493Z [command]/usr/bin/git config --local gc.auto 0 +2026-03-31T08:12:09.5725737Z ##[endgroup] +2026-03-31T08:12:09.5727523Z ##[group]Setting up auth +2026-03-31T08:12:09.5732757Z [command]/usr/bin/git config --local --name-only --get-regexp core\.sshCommand +2026-03-31T08:12:09.5766915Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'core\.sshCommand' && git config --local --unset-all 'core.sshCommand' || :" +2026-03-31T08:12:09.6070882Z [command]/usr/bin/git config --local --name-only --get-regexp http\.https\:\/\/github\.com\/\.extraheader +2026-03-31T08:12:09.6099212Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'http\.https\:\/\/github\.com\/\.extraheader' && git config --local --unset-all 'http.https://github.com/.extraheader' || :" +2026-03-31T08:12:09.6315060Z [command]/usr/bin/git config --local --name-only --get-regexp ^includeIf\.gitdir: +2026-03-31T08:12:09.6344907Z [command]/usr/bin/git submodule foreach --recursive git config --local --show-origin --name-only --get-regexp remote.origin.url +2026-03-31T08:12:09.6565425Z [command]/usr/bin/git config --local http.https://github.com/.extraheader AUTHORIZATION: basic *** +2026-03-31T08:12:09.6598867Z ##[endgroup] +2026-03-31T08:12:09.6599843Z ##[group]Fetching the repository +2026-03-31T08:12:09.6607025Z [command]/usr/bin/git -c protocol.version=2 fetch --no-tags --prune --no-recurse-submodules --depth=1 origin +543c25292a12f88977efc9bfa17079cec051e390:refs/remotes/pull/437/merge +2026-03-31T08:12:10.7984052Z From https://github.com/virtengine/bosun +2026-03-31T08:12:10.7985901Z * [new ref] 543c25292a12f88977efc9bfa17079cec051e390 -> pull/437/merge +2026-03-31T08:12:10.8014904Z ##[endgroup] +2026-03-31T08:12:10.8016264Z ##[group]Determining the checkout info +2026-03-31T08:12:10.8017814Z ##[endgroup] +2026-03-31T08:12:10.8023063Z [command]/usr/bin/git sparse-checkout disable +2026-03-31T08:12:10.8061174Z [command]/usr/bin/git config --local --unset-all extensions.worktreeConfig +2026-03-31T08:12:10.8088130Z ##[group]Checking out the ref +2026-03-31T08:12:10.8092963Z [command]/usr/bin/git checkout --progress --force refs/remotes/pull/437/merge +2026-03-31T08:12:10.9353869Z Note: switching to 'refs/remotes/pull/437/merge'. +2026-03-31T08:12:10.9354672Z +2026-03-31T08:12:10.9355278Z You are in 'detached HEAD' state. You can look around, make experimental +2026-03-31T08:12:10.9356611Z changes and commit them, and you can discard any commits you make in this +2026-03-31T08:12:10.9357937Z state without impacting any branches by switching back to a branch. +2026-03-31T08:12:10.9358713Z +2026-03-31T08:12:10.9359240Z If you want to create a new branch to retain commits you create, you may +2026-03-31T08:12:10.9360544Z do so (now or later) by using -c with the switch command. Example: +2026-03-31T08:12:10.9361326Z +2026-03-31T08:12:10.9361655Z git switch -c +2026-03-31T08:12:10.9362533Z +2026-03-31T08:12:10.9362916Z Or undo this operation with: +2026-03-31T08:12:10.9363417Z +2026-03-31T08:12:10.9363889Z git switch - +2026-03-31T08:12:10.9364647Z +2026-03-31T08:12:10.9365649Z Turn off this advice by setting config variable advice.detachedHead to false +2026-03-31T08:12:10.9366724Z +2026-03-31T08:12:10.9367868Z HEAD is now at 543c252 Merge d5665d1854745adf838443c2b0162cc67673514e into 66f9a9e4fbb9b3e822f4d36a49e0d6537379e769 +2026-03-31T08:12:10.9371466Z ##[endgroup] +2026-03-31T08:12:10.9402998Z [command]/usr/bin/git log -1 --format=%H +2026-03-31T08:12:10.9423936Z 543c25292a12f88977efc9bfa17079cec051e390 +2026-03-31T08:12:10.9762391Z ##[group]Run actions/setup-node@v4 +2026-03-31T08:12:10.9763579Z with: +2026-03-31T08:12:10.9764407Z node-version: 24 +2026-03-31T08:12:10.9765295Z cache: npm +2026-03-31T08:12:10.9766252Z cache-dependency-path: package-lock.json +2026-03-31T08:12:10.9767520Z always-auth: false +2026-03-31T08:12:10.9768472Z check-latest: false +2026-03-31T08:12:10.9769741Z token: *** +2026-03-31T08:12:10.9770611Z ##[endgroup] +2026-03-31T08:12:11.1666438Z Found in cache @ /opt/hostedtoolcache/node/24.14.0/x64 +2026-03-31T08:12:11.1672677Z ##[group]Environment details +2026-03-31T08:12:14.0311664Z node: v24.14.0 +2026-03-31T08:12:14.0313142Z npm: 11.9.0 +2026-03-31T08:12:14.0313526Z yarn: 1.22.22 +2026-03-31T08:12:14.0314849Z ##[endgroup] +2026-03-31T08:12:14.0333939Z [command]/opt/hostedtoolcache/node/24.14.0/x64/bin/npm config get cache +2026-03-31T08:12:14.2385682Z /home/runner/.npm +2026-03-31T08:12:14.5738325Z Cache hit for: node-cache-Linux-x64-npm-a4608dccb002348567ca67ca7fbaf89f2377543f3855fd295b180ae8cd05f22a +2026-03-31T08:12:15.8836801Z Received 4194304 of 469488352 (0.9%), 4.0 MBs/sec +2026-03-31T08:12:16.8847347Z Received 92274688 of 469488352 (19.7%), 44.0 MBs/sec +2026-03-31T08:12:17.8842506Z Received 239075328 of 469488352 (50.9%), 76.0 MBs/sec +2026-03-31T08:12:18.8840424Z Received 390070272 of 469488352 (83.1%), 93.0 MBs/sec +2026-03-31T08:12:19.6795226Z Received 469488352 of 469488352 (100.0%), 93.3 MBs/sec +2026-03-31T08:12:19.6796556Z Cache Size: ~448 MB (469488352 B) +2026-03-31T08:12:19.6837895Z [command]/usr/bin/tar -xf /home/runner/work/_temp/53b2b0ee-e3b8-4c12-a2b4-314fe5d2c1b3/cache.tzst -P -C /home/runner/work/bosun/bosun --use-compress-program unzstd +2026-03-31T08:12:20.5315683Z Cache restored successfully +2026-03-31T08:12:20.5535097Z Cache restored from key: node-cache-Linux-x64-npm-a4608dccb002348567ca67ca7fbaf89f2377543f3855fd295b180ae8cd05f22a +2026-03-31T08:12:20.5735170Z ##[group]Run npm ci +2026-03-31T08:12:20.5735456Z npm ci +2026-03-31T08:12:20.6929401Z shell: /usr/bin/bash -e {0} +2026-03-31T08:12:20.6929678Z ##[endgroup] +2026-03-31T08:12:26.2340834Z npm warn deprecated node-domexception@1.0.0: Use your platform's native DOMException instead +2026-03-31T08:12:31.7581361Z +2026-03-31T08:12:31.7581937Z > bosun@0.42.5 preinstall +2026-03-31T08:12:31.7586657Z > node -e "try{var r=require('child_process').execSync('npm ls -g codex-monitor --json --depth=0',{encoding:'utf8',stdio:['pipe','pipe','pipe']});var d=JSON.parse(r).dependencies;if(d&&d['codex-monitor']){console.log('\n Removing old codex-monitor package...');require('child_process').execSync('npm uninstall -g codex-monitor',{stdio:'inherit',timeout:30000});console.log(' \u2705 Migrated to bosun. codex-monitor aliases still work.\n')}}catch(e){}" +2026-03-31T08:12:31.7589495Z +2026-03-31T08:12:32.2983466Z +2026-03-31T08:12:32.2984165Z > bosun@0.42.5 postinstall +2026-03-31T08:12:32.2984824Z > node postinstall.mjs +2026-03-31T08:12:32.2985074Z +2026-03-31T08:12:32.3562572Z +2026-03-31T08:12:32.3563212Z > bosun@0.42.5 prepare +2026-03-31T08:12:32.3563747Z > node tools/vendor-sync.mjs +2026-03-31T08:12:32.3564035Z +2026-03-31T08:12:32.4074105Z [vendor-sync] Syncing vendor files to ui/vendor/ … +2026-03-31T08:12:32.4116197Z [vendor-sync] ✓ node_modules → preact.js +2026-03-31T08:12:32.4125956Z [vendor-sync] ✓ node_modules → preact-hooks.js +2026-03-31T08:12:32.4132263Z [vendor-sync] ✓ node_modules → preact-compat.js +2026-03-31T08:12:32.4139036Z [vendor-sync] ✓ node_modules → htm.js +2026-03-31T08:12:32.4145444Z [vendor-sync] ✓ node_modules → preact-signals-core.js +2026-03-31T08:12:32.4151867Z [vendor-sync] ✓ node_modules → preact-signals.js +2026-03-31T08:12:32.4158737Z [vendor-sync] ✓ node_modules → es-module-shims.js +2026-03-31T08:12:32.4161599Z [vendor-sync] Done — 7 vendor files ready in ui/vendor/ +2026-03-31T08:12:32.4327295Z +2026-03-31T08:12:32.4327846Z added 391 packages, and audited 392 packages in 12s +2026-03-31T08:12:32.4328282Z +2026-03-31T08:12:32.4329634Z 108 packages are looking for funding +2026-03-31T08:12:32.4331033Z run `npm fund` for details +2026-03-31T08:12:32.4366689Z +2026-03-31T08:12:32.4367130Z 2 vulnerabilities (1 moderate, 1 high) +2026-03-31T08:12:32.4367470Z +2026-03-31T08:12:32.4367620Z To address all issues, run: +2026-03-31T08:12:32.4367915Z npm audit fix +2026-03-31T08:12:32.4368054Z +2026-03-31T08:12:32.4368212Z Run `npm audit` for details. +2026-03-31T08:12:32.5692860Z ##[group]Run node tools/prepublish-check.mjs +2026-03-31T08:12:32.5693262Z node tools/prepublish-check.mjs +2026-03-31T08:12:32.5721515Z shell: /usr/bin/bash -e {0} +2026-03-31T08:12:32.5721752Z ##[endgroup] +2026-03-31T08:12:32.9446678Z :close: Published local imports missing from package.json files array: +2026-03-31T08:12:32.9448762Z workflow/mcp-discovery-proxy.mjs -> ../infra/windows-hidden-child-processes.mjs (infra/windows-hidden-child-processes.mjs) +2026-03-31T08:12:32.9450974Z workflow/mcp-registry.mjs -> ../infra/windows-hidden-child-processes.mjs (infra/windows-hidden-child-processes.mjs) +2026-03-31T08:12:32.9452428Z +2026-03-31T08:12:32.9453130Z Add the resolved targets to the 'files' array in package.json. +2026-03-31T08:12:32.9520563Z ##[error]Process completed with exit code 1. +2026-03-31T08:12:32.9654164Z Post job cleanup. +2026-03-31T08:12:33.0591186Z [command]/usr/bin/git version +2026-03-31T08:12:33.0626911Z git version 2.53.0 +2026-03-31T08:12:33.0670828Z Temporarily overriding HOME='/home/runner/work/_temp/e93284f7-796c-4965-a717-4cff07902629' before making global git config changes +2026-03-31T08:12:33.0672738Z Adding repository directory to the temporary git global config as a safe directory +2026-03-31T08:12:33.0684143Z [command]/usr/bin/git config --global --add safe.directory /home/runner/work/bosun/bosun +2026-03-31T08:12:33.0716596Z [command]/usr/bin/git config --local --name-only --get-regexp core\.sshCommand +2026-03-31T08:12:33.0747736Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'core\.sshCommand' && git config --local --unset-all 'core.sshCommand' || :" +2026-03-31T08:12:33.0966113Z [command]/usr/bin/git config --local --name-only --get-regexp http\.https\:\/\/github\.com\/\.extraheader +2026-03-31T08:12:33.0986125Z http.https://github.com/.extraheader +2026-03-31T08:12:33.0998635Z [command]/usr/bin/git config --local --unset-all http.https://github.com/.extraheader +2026-03-31T08:12:33.1029099Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'http\.https\:\/\/github\.com\/\.extraheader' && git config --local --unset-all 'http.https://github.com/.extraheader' || :" +2026-03-31T08:12:33.1243130Z [command]/usr/bin/git config --local --name-only --get-regexp ^includeIf\.gitdir: +2026-03-31T08:12:33.1273387Z [command]/usr/bin/git submodule foreach --recursive git config --local --show-origin --name-only --get-regexp remote.origin.url +2026-03-31T08:12:33.1593290Z Cleaning up orphan processes +2026-03-31T08:12:33.1895738Z ##[warning]Node.js 20 actions are deprecated. The following actions are running on Node.js 20 and may not work as expected: actions/checkout@v4, actions/setup-node@v4. Actions will be forced to run with Node.js 24 by default starting June 2nd, 2026. Node.js 20 will be removed from the runner on September 16th, 2026. Please check if updated versions of these actions are available that support Node.js 24. To opt into Node.js 24 now, set the FORCE_JAVASCRIPT_ACTIONS_TO_NODE24=true environment variable on the runner or in your workflow file. Once Node.js 24 becomes the default, you can temporarily opt out by setting ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION=true. For more information see: https://github.blog/changelog/2025-09-19-deprecation-of-node-20-on-github-actions-runners/ diff --git a/ci-job-69313533034.log b/ci-job-69313533034.log new file mode 100644 index 000000000..89da944f3 --- /dev/null +++ b/ci-job-69313533034.log @@ -0,0 +1,364 @@ +2026-03-31T08:12:08.7635919Z Current runner version: '2.333.1' +2026-03-31T08:12:08.7671200Z ##[group]Runner Image Provisioner +2026-03-31T08:12:08.7672694Z Hosted Compute Agent +2026-03-31T08:12:08.7673638Z Version: 20260213.493 +2026-03-31T08:12:08.7674578Z Commit: 5c115507f6dd24b8de37d8bbe0bb4509d0cc0fa3 +2026-03-31T08:12:08.7675762Z Build Date: 2026-02-13T00:28:41Z +2026-03-31T08:12:08.7676710Z Worker ID: {d0d7f692-3925-4ee4-9d83-cbe3f08b228f} +2026-03-31T08:12:08.7677835Z Azure Region: centralus +2026-03-31T08:12:08.7678828Z ##[endgroup] +2026-03-31T08:12:08.7681764Z ##[group]Operating System +2026-03-31T08:12:08.7682711Z Ubuntu +2026-03-31T08:12:08.7683547Z 24.04.4 +2026-03-31T08:12:08.7684308Z LTS +2026-03-31T08:12:08.7684995Z ##[endgroup] +2026-03-31T08:12:08.7686001Z ##[group]Runner Image +2026-03-31T08:12:08.7687016Z Image: ubuntu-24.04 +2026-03-31T08:12:08.7687938Z Version: 20260323.65.1 +2026-03-31T08:12:08.7690082Z Included Software: https://github.com/actions/runner-images/blob/ubuntu24/20260323.65/images/ubuntu/Ubuntu2404-Readme.md +2026-03-31T08:12:08.7692789Z Image Release: https://github.com/actions/runner-images/releases/tag/ubuntu24%2F20260323.65 +2026-03-31T08:12:08.7694411Z ##[endgroup] +2026-03-31T08:12:08.7696019Z ##[group]GITHUB_TOKEN Permissions +2026-03-31T08:12:08.7698765Z Contents: read +2026-03-31T08:12:08.7699697Z Metadata: read +2026-03-31T08:12:08.7701250Z ##[endgroup] +2026-03-31T08:12:08.7704098Z Secret source: Actions +2026-03-31T08:12:08.7705832Z Prepare workflow directory +2026-03-31T08:12:08.8076240Z Prepare all required actions +2026-03-31T08:12:08.8115858Z Getting action download info +2026-03-31T08:12:09.2126029Z Download action repository 'actions/checkout@v4' (SHA:34e114876b0b11c390a56381ad16ebd13914f8d5) +2026-03-31T08:12:09.3683029Z Download action repository 'actions/setup-node@v4' (SHA:49933ea5288caeca8642d1e84afbd3f7d6820020) +2026-03-31T08:12:09.5526638Z Complete job name: 📋 Template Integrity +2026-03-31T08:12:09.6244453Z ##[group]Run actions/checkout@v4 +2026-03-31T08:12:09.6245388Z with: +2026-03-31T08:12:09.6245773Z repository: virtengine/bosun +2026-03-31T08:12:09.6246451Z token: *** +2026-03-31T08:12:09.6246826Z ssh-strict: true +2026-03-31T08:12:09.6247210Z ssh-user: git +2026-03-31T08:12:09.6247603Z persist-credentials: true +2026-03-31T08:12:09.6248043Z clean: true +2026-03-31T08:12:09.6248428Z sparse-checkout-cone-mode: true +2026-03-31T08:12:09.6248900Z fetch-depth: 1 +2026-03-31T08:12:09.6249271Z fetch-tags: false +2026-03-31T08:12:09.6249659Z show-progress: true +2026-03-31T08:12:09.6250062Z lfs: false +2026-03-31T08:12:09.6250425Z submodules: false +2026-03-31T08:12:09.6251027Z set-safe-directory: true +2026-03-31T08:12:09.6251737Z ##[endgroup] +2026-03-31T08:12:09.7367292Z Syncing repository: virtengine/bosun +2026-03-31T08:12:09.7369040Z ##[group]Getting Git version info +2026-03-31T08:12:09.7369709Z Working directory is '/home/runner/work/bosun/bosun' +2026-03-31T08:12:09.7371039Z [command]/usr/bin/git version +2026-03-31T08:12:09.7419451Z git version 2.53.0 +2026-03-31T08:12:09.7447910Z ##[endgroup] +2026-03-31T08:12:09.7464710Z Temporarily overriding HOME='/home/runner/work/_temp/25cbb6fc-3adb-49fd-b50d-49c157cab713' before making global git config changes +2026-03-31T08:12:09.7467090Z Adding repository directory to the temporary git global config as a safe directory +2026-03-31T08:12:09.7471641Z [command]/usr/bin/git config --global --add safe.directory /home/runner/work/bosun/bosun +2026-03-31T08:12:09.7506362Z Deleting the contents of '/home/runner/work/bosun/bosun' +2026-03-31T08:12:09.7510784Z ##[group]Initializing the repository +2026-03-31T08:12:09.7515648Z [command]/usr/bin/git init /home/runner/work/bosun/bosun +2026-03-31T08:12:09.7605487Z hint: Using 'master' as the name for the initial branch. This default branch name +2026-03-31T08:12:09.7607076Z hint: will change to "main" in Git 3.0. To configure the initial branch name +2026-03-31T08:12:09.7608018Z hint: to use in all of your new repositories, which will suppress this warning, +2026-03-31T08:12:09.7608751Z hint: call: +2026-03-31T08:12:09.7609311Z hint: +2026-03-31T08:12:09.7610073Z hint: git config --global init.defaultBranch +2026-03-31T08:12:09.7610934Z hint: +2026-03-31T08:12:09.7611516Z hint: Names commonly chosen instead of 'master' are 'main', 'trunk' and +2026-03-31T08:12:09.7612880Z hint: 'development'. The just-created branch can be renamed via this command: +2026-03-31T08:12:09.7613572Z hint: +2026-03-31T08:12:09.7613944Z hint: git branch -m +2026-03-31T08:12:09.7614364Z hint: +2026-03-31T08:12:09.7614945Z hint: Disable this message with "git config set advice.defaultBranchName false" +2026-03-31T08:12:09.7616046Z Initialized empty Git repository in /home/runner/work/bosun/bosun/.git/ +2026-03-31T08:12:09.7617948Z [command]/usr/bin/git remote add origin https://github.com/virtengine/bosun +2026-03-31T08:12:09.7649051Z ##[endgroup] +2026-03-31T08:12:09.7649772Z ##[group]Disabling automatic garbage collection +2026-03-31T08:12:09.7653702Z [command]/usr/bin/git config --local gc.auto 0 +2026-03-31T08:12:09.7681842Z ##[endgroup] +2026-03-31T08:12:09.7682524Z ##[group]Setting up auth +2026-03-31T08:12:09.7688862Z [command]/usr/bin/git config --local --name-only --get-regexp core\.sshCommand +2026-03-31T08:12:09.7717680Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'core\.sshCommand' && git config --local --unset-all 'core.sshCommand' || :" +2026-03-31T08:12:09.8029205Z [command]/usr/bin/git config --local --name-only --get-regexp http\.https\:\/\/github\.com\/\.extraheader +2026-03-31T08:12:09.8064494Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'http\.https\:\/\/github\.com\/\.extraheader' && git config --local --unset-all 'http.https://github.com/.extraheader' || :" +2026-03-31T08:12:09.8305684Z [command]/usr/bin/git config --local --name-only --get-regexp ^includeIf\.gitdir: +2026-03-31T08:12:09.8355840Z [command]/usr/bin/git submodule foreach --recursive git config --local --show-origin --name-only --get-regexp remote.origin.url +2026-03-31T08:12:09.8584890Z [command]/usr/bin/git config --local http.https://github.com/.extraheader AUTHORIZATION: basic *** +2026-03-31T08:12:09.8618305Z ##[endgroup] +2026-03-31T08:12:09.8619569Z ##[group]Fetching the repository +2026-03-31T08:12:09.8628032Z [command]/usr/bin/git -c protocol.version=2 fetch --no-tags --prune --no-recurse-submodules --depth=1 origin +543c25292a12f88977efc9bfa17079cec051e390:refs/remotes/pull/437/merge +2026-03-31T08:12:11.1701723Z From https://github.com/virtengine/bosun +2026-03-31T08:12:11.1704885Z * [new ref] 543c25292a12f88977efc9bfa17079cec051e390 -> pull/437/merge +2026-03-31T08:12:11.1732244Z ##[endgroup] +2026-03-31T08:12:11.1734327Z ##[group]Determining the checkout info +2026-03-31T08:12:11.1736343Z ##[endgroup] +2026-03-31T08:12:11.1740758Z [command]/usr/bin/git sparse-checkout disable +2026-03-31T08:12:11.1778612Z [command]/usr/bin/git config --local --unset-all extensions.worktreeConfig +2026-03-31T08:12:11.1804995Z ##[group]Checking out the ref +2026-03-31T08:12:11.1808904Z [command]/usr/bin/git checkout --progress --force refs/remotes/pull/437/merge +2026-03-31T08:12:11.3085252Z Note: switching to 'refs/remotes/pull/437/merge'. +2026-03-31T08:12:11.3086221Z +2026-03-31T08:12:11.3086921Z You are in 'detached HEAD' state. You can look around, make experimental +2026-03-31T08:12:11.3088582Z changes and commit them, and you can discard any commits you make in this +2026-03-31T08:12:11.3090192Z state without impacting any branches by switching back to a branch. +2026-03-31T08:12:11.3092097Z +2026-03-31T08:12:11.3093189Z If you want to create a new branch to retain commits you create, you may +2026-03-31T08:12:11.3095531Z do so (now or later) by using -c with the switch command. Example: +2026-03-31T08:12:11.3096859Z +2026-03-31T08:12:11.3097399Z git switch -c +2026-03-31T08:12:11.3098337Z +2026-03-31T08:12:11.3098942Z Or undo this operation with: +2026-03-31T08:12:11.3099840Z +2026-03-31T08:12:11.3100378Z git switch - +2026-03-31T08:12:11.3101485Z +2026-03-31T08:12:11.3102996Z Turn off this advice by setting config variable advice.detachedHead to false +2026-03-31T08:12:11.3104858Z +2026-03-31T08:12:11.3106817Z HEAD is now at 543c252 Merge d5665d1854745adf838443c2b0162cc67673514e into 66f9a9e4fbb9b3e822f4d36a49e0d6537379e769 +2026-03-31T08:12:11.3112988Z ##[endgroup] +2026-03-31T08:12:11.3141208Z [command]/usr/bin/git log -1 --format=%H +2026-03-31T08:12:11.3166052Z 543c25292a12f88977efc9bfa17079cec051e390 +2026-03-31T08:12:11.3507670Z ##[group]Run actions/setup-node@v4 +2026-03-31T08:12:11.3508868Z with: +2026-03-31T08:12:11.3509677Z node-version: 24 +2026-03-31T08:12:11.3510797Z cache: npm +2026-03-31T08:12:11.3511812Z cache-dependency-path: package-lock.json +2026-03-31T08:12:11.3513049Z always-auth: false +2026-03-31T08:12:11.3513964Z check-latest: false +2026-03-31T08:12:11.3515213Z token: *** +2026-03-31T08:12:11.3516033Z ##[endgroup] +2026-03-31T08:12:11.5321303Z Found in cache @ /opt/hostedtoolcache/node/24.14.0/x64 +2026-03-31T08:12:11.5328475Z ##[group]Environment details +2026-03-31T08:12:14.9034761Z node: v24.14.0 +2026-03-31T08:12:14.9035286Z npm: 11.9.0 +2026-03-31T08:12:14.9035519Z yarn: 1.22.22 +2026-03-31T08:12:14.9036356Z ##[endgroup] +2026-03-31T08:12:14.9062084Z [command]/opt/hostedtoolcache/node/24.14.0/x64/bin/npm config get cache +2026-03-31T08:12:15.1207183Z /home/runner/.npm +2026-03-31T08:12:15.2761741Z Cache hit for: node-cache-Linux-x64-npm-a4608dccb002348567ca67ca7fbaf89f2377543f3855fd295b180ae8cd05f22a +2026-03-31T08:12:16.4463466Z Received 62914560 of 469488352 (13.4%), 60.0 MBs/sec +2026-03-31T08:12:17.4442171Z Received 243269632 of 469488352 (51.8%), 115.9 MBs/sec +2026-03-31T08:12:18.4449787Z Received 381681664 of 469488352 (81.3%), 121.3 MBs/sec +2026-03-31T08:12:19.2565859Z Received 469488352 of 469488352 (100.0%), 117.4 MBs/sec +2026-03-31T08:12:19.2573098Z Cache Size: ~448 MB (469488352 B) +2026-03-31T08:12:19.2705841Z [command]/usr/bin/tar -xf /home/runner/work/_temp/1f237839-d261-4c2b-a79c-3fed6d12f91d/cache.tzst -P -C /home/runner/work/bosun/bosun --use-compress-program unzstd +2026-03-31T08:12:20.4083715Z Cache restored successfully +2026-03-31T08:12:20.4315921Z Cache restored from key: node-cache-Linux-x64-npm-a4608dccb002348567ca67ca7fbaf89f2377543f3855fd295b180ae8cd05f22a +2026-03-31T08:12:20.4544981Z ##[group]Run npm ci +2026-03-31T08:12:20.4545419Z npm ci +2026-03-31T08:12:20.4593325Z shell: /usr/bin/bash -e {0} +2026-03-31T08:12:20.4593751Z ##[endgroup] +2026-03-31T08:12:26.3879045Z npm warn deprecated node-domexception@1.0.0: Use your platform's native DOMException instead +2026-03-31T08:12:31.7905958Z +2026-03-31T08:12:31.7906607Z > bosun@0.42.5 preinstall +2026-03-31T08:12:31.7911009Z > node -e "try{var r=require('child_process').execSync('npm ls -g codex-monitor --json --depth=0',{encoding:'utf8',stdio:['pipe','pipe','pipe']});var d=JSON.parse(r).dependencies;if(d&&d['codex-monitor']){console.log('\n Removing old codex-monitor package...');require('child_process').execSync('npm uninstall -g codex-monitor',{stdio:'inherit',timeout:30000});console.log(' \u2705 Migrated to bosun. codex-monitor aliases still work.\n')}}catch(e){}" +2026-03-31T08:12:31.7913981Z +2026-03-31T08:12:32.3120339Z +2026-03-31T08:12:32.3123383Z > bosun@0.42.5 postinstall +2026-03-31T08:12:32.3125136Z > node postinstall.mjs +2026-03-31T08:12:32.3125472Z +2026-03-31T08:12:32.3738332Z +2026-03-31T08:12:32.3739462Z > bosun@0.42.5 prepare +2026-03-31T08:12:32.3740027Z > node tools/vendor-sync.mjs +2026-03-31T08:12:32.3740342Z +2026-03-31T08:12:32.4290097Z [vendor-sync] Syncing vendor files to ui/vendor/ … +2026-03-31T08:12:32.4329604Z [vendor-sync] ✓ node_modules → preact.js +2026-03-31T08:12:32.4341038Z [vendor-sync] ✓ node_modules → preact-hooks.js +2026-03-31T08:12:32.4347003Z [vendor-sync] ✓ node_modules → preact-compat.js +2026-03-31T08:12:32.4353199Z [vendor-sync] ✓ node_modules → htm.js +2026-03-31T08:12:32.4359694Z [vendor-sync] ✓ node_modules → preact-signals-core.js +2026-03-31T08:12:32.4367506Z [vendor-sync] ✓ node_modules → preact-signals.js +2026-03-31T08:12:32.4372753Z [vendor-sync] ✓ node_modules → es-module-shims.js +2026-03-31T08:12:32.4375905Z [vendor-sync] Done — 7 vendor files ready in ui/vendor/ +2026-03-31T08:12:32.4554380Z +2026-03-31T08:12:32.4555245Z added 391 packages, and audited 392 packages in 12s +2026-03-31T08:12:32.4555735Z +2026-03-31T08:12:32.4556160Z 108 packages are looking for funding +2026-03-31T08:12:32.4557004Z run `npm fund` for details +2026-03-31T08:12:32.4591999Z +2026-03-31T08:12:32.4592383Z 2 vulnerabilities (1 moderate, 1 high) +2026-03-31T08:12:32.4592735Z +2026-03-31T08:12:32.4592877Z To address all issues, run: +2026-03-31T08:12:32.4593164Z npm audit fix +2026-03-31T08:12:32.4593300Z +2026-03-31T08:12:32.4593431Z Run `npm audit` for details. +2026-03-31T08:12:32.5833747Z ##[group]Run node --input-type=module <<'EOF' +2026-03-31T08:12:32.5834125Z node --input-type=module <<'EOF' +2026-03-31T08:12:32.5834539Z import { WORKFLOW_TEMPLATES } from "./workflow/workflow-templates.mjs"; +2026-03-31T08:12:32.5835053Z console.log(`✅ Loaded ${WORKFLOW_TEMPLATES.length} templates`); +2026-03-31T08:12:32.5835524Z if (!WORKFLOW_TEMPLATES.length) process.exit(1); +2026-03-31T08:12:32.5835818Z EOF +2026-03-31T08:12:32.5864148Z shell: /usr/bin/bash -e {0} +2026-03-31T08:12:32.5864392Z ##[endgroup] +2026-03-31T08:12:32.6468330Z ✅ Loaded 63 templates +2026-03-31T08:12:32.6557364Z ##[group]Run node tests/sandbox/validate-template-nodes.mjs +2026-03-31T08:12:32.6557799Z node tests/sandbox/validate-template-nodes.mjs +2026-03-31T08:12:32.6586251Z shell: /usr/bin/bash -e {0} +2026-03-31T08:12:32.6586493Z ##[endgroup] +2026-03-31T08:12:32.7847480Z +2026-03-31T08:12:32.7848388Z 📋 Template Integrity Report +2026-03-31T08:12:32.7849841Z Templates : 63 +2026-03-31T08:12:32.7850272Z Nodes : 675 +2026-03-31T08:12:32.7850928Z Edges : 699 +2026-03-31T08:12:32.7851149Z +2026-03-31T08:12:32.7851481Z ⚠️ Warnings (133): +2026-03-31T08:12:32.7852661Z [template-release-drafter] Node "draft-notes" references "{{prList}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7855344Z [template-release-drafter] Node "draft-notes" references "{{commitLog}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7857279Z [template-release-drafter] Node "draft-notes" references "{{lastTag}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7859112Z [template-release-drafter] Node "save-draft" references "{{releaseNotes}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7861220Z [template-sdk-conflict-resolver] Node "launch-agent" references "{{manualFiles}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7863060Z [template-backend-agent] Node "write-tests" references "{{plan}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7864314Z [template-backend-agent] Node "create-pr" references "{{plan}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7865348Z [template-backend-agent] Node "auto-fix" references "{{plan}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7866222Z [template-backend-agent] Node "create-pr-retry" references "{{plan}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7867238Z [template-meeting-subworkflow-chain] Node "execute-child-workflow" references "{{_workflowId}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7868398Z [template-meeting-subworkflow-chain] Node "execute-child-workflow" references "{{_workflowRunId}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7869491Z [template-nightly-report] Node "generate-report" references "{{taskStats}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7870719Z [template-nightly-report] Node "generate-report" references "{{prStats}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7871961Z [template-nightly-report] Node "generate-report" references "{{agentStats}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7873343Z [template-nightly-report] Node "send-report" references "{{reportOutput}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7874364Z [template-sprint-retrospective] Node "generate-retro" references "{{taskMetrics}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7875417Z [template-sprint-retrospective] Node "generate-retro" references "{{prMetrics}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7876693Z [template-sprint-retrospective] Node "generate-retro" references "{{errorAnalysis}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7877780Z [template-sprint-retrospective] Node "generate-retro" references "{{agentStats}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7878837Z [template-sprint-retrospective] Node "send-report" references "{{retroOutput}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7879880Z [template-error-recovery] Node "analyze-error" references "{{lastError}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7881461Z [template-error-recovery] Node "retry-task" references "{{taskExecutorRetryPrompt}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7882613Z [template-error-recovery] Node "retry-task" references "{{lastError}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7883580Z [template-error-recovery] Node "escalate" references "{{lastError}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7884592Z [template-anomaly-watchdog] Node "log-intervention" references "{{anomalyType}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7885641Z [template-anomaly-watchdog] Node "alert-telegram" references "{{anomalyType}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7886693Z [template-task-finalization-guard] Node "mark-inreview" references "{{prUrl}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7887826Z [template-task-status-transition-manager] Node "unknown-status" references "{{targetStatus}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7888935Z [template-incident-response] Node "classify-incident" references "{{evidence}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7890031Z [template-incident-response] Node "create-incident-task" references "{{incidentCategory}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7891359Z [template-incident-response] Node "create-incident-task" references "{{evidence}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7892462Z [template-incident-response] Node "create-incident-task" references "{{classification}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7893578Z [template-incident-response] Node "assign-agent" references "{{classification}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7894630Z [template-incident-response] Node "assign-agent" references "{{evidence}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7895728Z [template-incident-response] Node "alert-critical" references "{{incidentCategory}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7896726Z [template-incident-response] Node "alert-critical" references "{{rootCause}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7898024Z [template-incident-response] Node "alert-standard" references "{{incidentCategory}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7899968Z [template-task-archiver] Node "archive-to-file" references "{{taskJson}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7901531Z [template-task-archiver] Node "cleanup-sessions" references "{{attemptId}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7902458Z [template-sync-engine] Node "pull-external" references "{{projectId}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7903356Z [template-sync-engine] Node "push-internal" references "{{projectId}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7904278Z [template-sync-engine] Node "alert-failures" references "{{lastSyncErrors}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7905181Z [template-sync-engine] Node "log-success" references "{{pullCount}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7906563Z [template-sync-engine] Node "log-success" references "{{pushCount}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7908200Z [template-sync-engine] Node "log-success" references "{{conflictCount}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7910064Z [template-sync-engine] Node "log-partial" references "{{lastSyncErrors}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7911997Z [template-dependency-audit] Node "create-fix-pr" references "{{_runId}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7913697Z [template-secret-scanner] Node "classify-secret" references "{{scanOutput}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7915600Z [template-task-fullstack] Node "plan-architecture" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7917368Z [template-task-fullstack] Node "plan-architecture" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7919335Z [template-task-fullstack] Node "plan-architecture" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7921261Z [template-task-fullstack] Node "implement-backend" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7923145Z [template-task-fullstack] Node "implement-backend" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7924804Z [template-task-fullstack] Node "implement-backend" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7926497Z [template-task-fullstack] Node "implement-frontend" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7928370Z [template-task-fullstack] Node "implement-frontend" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7930203Z [template-task-fullstack] Node "implement-frontend" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7932220Z [template-task-fullstack] Node "integration-test" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7933671Z [template-task-fullstack] Node "integration-test" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7934660Z [template-task-fullstack] Node "integration-test" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7935560Z [template-task-backend] Node "plan" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7937307Z [template-task-backend] Node "plan" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7938784Z [template-task-backend] Node "plan" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7939858Z [template-task-backend] Node "implement-tdd" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7941081Z [template-task-backend] Node "implement-tdd" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7942058Z [template-task-backend] Node "implement-tdd" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7942994Z [template-task-backend] Node "verify" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7943877Z [template-task-backend] Node "verify" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7944765Z [template-task-backend] Node "verify" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7945677Z [template-task-frontend] Node "analyse-design" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7946750Z [template-task-frontend] Node "analyse-design" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7947716Z [template-task-frontend] Node "analyse-design" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7948647Z [template-task-frontend] Node "implement-ui" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7949560Z [template-task-frontend] Node "implement-ui" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7950702Z [template-task-frontend] Node "implement-ui" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7951860Z [template-task-frontend] Node "verify-visual" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7952950Z [template-task-frontend] Node "verify-visual" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7953907Z [template-task-frontend] Node "verify-visual" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7954804Z [template-task-debug] Node "reproduce" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7955690Z [template-task-debug] Node "reproduce" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7956563Z [template-task-debug] Node "reproduce" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7957437Z [template-task-debug] Node "fix-and-test" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7958591Z [template-task-debug] Node "fix-and-test" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7959493Z [template-task-debug] Node "fix-and-test" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7960362Z [template-task-debug] Node "verify" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7961498Z [template-task-debug] Node "verify" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7962348Z [template-task-debug] Node "verify" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7963233Z [template-task-cicd] Node "plan-pipeline" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7964332Z [template-task-cicd] Node "plan-pipeline" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7965232Z [template-task-cicd] Node "plan-pipeline" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7966152Z [template-task-cicd] Node "implement-pipeline" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7967092Z [template-task-cicd] Node "implement-pipeline" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7968030Z [template-task-cicd] Node "implement-pipeline" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7968951Z [template-task-cicd] Node "verify-pipeline" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7969859Z [template-task-cicd] Node "verify-pipeline" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7971021Z [template-task-cicd] Node "verify-pipeline" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7972112Z [template-task-design] Node "analyse-requirements" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7973113Z [template-task-design] Node "analyse-requirements" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7974088Z [template-task-design] Node "analyse-requirements" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7975034Z [template-task-design] Node "implement-design" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7975972Z [template-task-design] Node "implement-design" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7976919Z [template-task-design] Node "implement-design" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7977849Z [template-task-design] Node "verify-design" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7978754Z [template-task-design] Node "verify-design" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7979664Z [template-task-design] Node "verify-design" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7980883Z [template-task-lifecycle] Node "acquire-worktree" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7981969Z [template-task-lifecycle] Node "resolve-executor" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7982925Z [template-task-lifecycle] Node "resolve-executor" references "{{workspace}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7983902Z [template-task-lifecycle] Node "read-workflow-contract" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7984922Z [template-task-lifecycle] Node "workflow-contract-validation" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7985892Z [template-task-lifecycle] Node "build-prompt" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7986785Z [template-task-lifecycle] Node "build-prompt" references "{{repoSlug}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7987689Z [template-task-lifecycle] Node "build-prompt" references "{{workspace}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7988741Z [template-task-lifecycle] Node "build-prompt" references "{{repository}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7989662Z [template-task-lifecycle] Node "build-prompt" references "{{repositories}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7990858Z [template-task-lifecycle] Node "run-agent-plan" references "{{_taskPrompt}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7991833Z [template-task-lifecycle] Node "run-agent-plan" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7992778Z [template-task-lifecycle] Node "run-agent-plan" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7993730Z [template-task-lifecycle] Node "run-agent-plan" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7994687Z [template-task-lifecycle] Node "run-agent-tests" references "{{_taskPrompt}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7995622Z [template-task-lifecycle] Node "run-agent-tests" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7996746Z [template-task-lifecycle] Node "run-agent-tests" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7997710Z [template-task-lifecycle] Node "run-agent-tests" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7998675Z [template-task-lifecycle] Node "run-agent-implement" references "{{_taskPrompt}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.7999647Z [template-task-lifecycle] Node "run-agent-implement" references "{{resolvedSdk}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8000832Z [template-task-lifecycle] Node "run-agent-implement" references "{{resolvedModel}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8001842Z [template-task-lifecycle] Node "run-agent-implement" references "{{agentProfile}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8002803Z [template-task-lifecycle] Node "release-worktree" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8003729Z [template-task-lifecycle] Node "recover-worktree" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8004641Z [template-task-lifecycle] Node "retry-acquire-wt" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8005555Z [template-task-lifecycle] Node "sweep-task-wts" references "{{repoRoot}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8006482Z [template-research-agent] Node "generator" references "{{_previousFeedback}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8007419Z [template-mcp-iterative-research] Node "research" references "{{topic}}" which is not in template.variables or well-known inputs +2026-03-31T08:12:32.8007893Z +2026-03-31T08:12:32.8008317Z ✅ All integrity checks passed — 63 templates are well-formed. +2026-03-31T08:12:32.8034640Z ##[group]Run node tools/prepublish-check.mjs +2026-03-31T08:12:32.8034993Z node tools/prepublish-check.mjs +2026-03-31T08:12:32.8062261Z shell: /usr/bin/bash -e {0} +2026-03-31T08:12:32.8062505Z ##[endgroup] +2026-03-31T08:12:33.2224505Z :close: Published local imports missing from package.json files array: +2026-03-31T08:12:33.2226615Z workflow/mcp-discovery-proxy.mjs -> ../infra/windows-hidden-child-processes.mjs (infra/windows-hidden-child-processes.mjs) +2026-03-31T08:12:33.2228312Z workflow/mcp-registry.mjs -> ../infra/windows-hidden-child-processes.mjs (infra/windows-hidden-child-processes.mjs) +2026-03-31T08:12:33.2229376Z +2026-03-31T08:12:33.2229722Z Add the resolved targets to the 'files' array in package.json. +2026-03-31T08:12:33.2303212Z ##[error]Process completed with exit code 1. +2026-03-31T08:12:33.2428187Z Post job cleanup. +2026-03-31T08:12:33.3398613Z [command]/usr/bin/git version +2026-03-31T08:12:33.3435700Z git version 2.53.0 +2026-03-31T08:12:33.3488520Z Temporarily overriding HOME='/home/runner/work/_temp/397ed9d9-2487-4844-8a86-8fa8bb3f07d9' before making global git config changes +2026-03-31T08:12:33.3490110Z Adding repository directory to the temporary git global config as a safe directory +2026-03-31T08:12:33.3495149Z [command]/usr/bin/git config --global --add safe.directory /home/runner/work/bosun/bosun +2026-03-31T08:12:33.3531427Z [command]/usr/bin/git config --local --name-only --get-regexp core\.sshCommand +2026-03-31T08:12:33.3564667Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'core\.sshCommand' && git config --local --unset-all 'core.sshCommand' || :" +2026-03-31T08:12:33.3795280Z [command]/usr/bin/git config --local --name-only --get-regexp http\.https\:\/\/github\.com\/\.extraheader +2026-03-31T08:12:33.3817424Z http.https://github.com/.extraheader +2026-03-31T08:12:33.3830243Z [command]/usr/bin/git config --local --unset-all http.https://github.com/.extraheader +2026-03-31T08:12:33.3861748Z [command]/usr/bin/git submodule foreach --recursive sh -c "git config --local --name-only --get-regexp 'http\.https\:\/\/github\.com\/\.extraheader' && git config --local --unset-all 'http.https://github.com/.extraheader' || :" +2026-03-31T08:12:33.4081848Z [command]/usr/bin/git config --local --name-only --get-regexp ^includeIf\.gitdir: +2026-03-31T08:12:33.4113545Z [command]/usr/bin/git submodule foreach --recursive git config --local --show-origin --name-only --get-regexp remote.origin.url +2026-03-31T08:12:33.4485684Z Cleaning up orphan processes +2026-03-31T08:12:33.4896334Z ##[warning]Node.js 20 actions are deprecated. The following actions are running on Node.js 20 and may not work as expected: actions/checkout@v4, actions/setup-node@v4. Actions will be forced to run with Node.js 24 by default starting June 2nd, 2026. Node.js 20 will be removed from the runner on September 16th, 2026. Please check if updated versions of these actions are available that support Node.js 24. To opt into Node.js 24 now, set the FORCE_JAVASCRIPT_ACTIONS_TO_NODE24=true environment variable on the runner or in your workflow file. Once Node.js 24 becomes the default, you can temporarily opt out by setting ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION=true. For more information see: https://github.blog/changelog/2025-09-19-deprecation-of-node-20-on-github-actions-runners/ diff --git a/cli.mjs b/cli.mjs index 15c84cfec..32ed20823 100755 --- a/cli.mjs +++ b/cli.mjs @@ -28,7 +28,6 @@ import { fileURLToPath, pathToFileURL } from "node:url"; import { execFileSync, execSync, spawn } from "node:child_process"; import os from "node:os"; import { Worker } from "node:worker_threads"; -import "./infra/windows-hidden-child-processes.mjs"; import { createDaemonCrashTracker } from "./infra/daemon-restart-policy.mjs"; import { ensureTestRuntimeSandbox } from "./infra/test-runtime.mjs"; import { @@ -86,7 +85,6 @@ function showHelp() { workflow list List declarative pipeline workflows workflow run Run a declarative pipeline workflow workflow nodes Inspect custom workflow node plugin health - eval Run agent evaluation and benchmarking tools tui Launch the terminal UI audit Run codebase annotation audit tools (scan|generate|warn|manifest|index|trim|conformity|migrate) --setup Launch the web-based setup wizard (default) @@ -111,7 +109,6 @@ function showHelp() { --no-auto-update Disable background auto-update polling --daemon, -d Run as a background daemon (detached, with PID file) --stop-daemon Stop a running daemon process - --restart Request a code reload from the running bosun instance --terminate Hard-stop all bosun processes (daemon + monitor + companions) --daemon-status Check if daemon is running @@ -177,7 +174,6 @@ function showHelp() { workflow run Run a declarative fresh-context workflow Run 'bosun workflow --help' for workflow CLI examples. - Run 'bosun eval --help' for evaluation CLI examples. Run 'bosun tui' to launch the terminal UI. STARTUP SERVICE @@ -332,7 +328,6 @@ const LEGACY_MONITOR_PID_FILE = resolve(__dirname, ".cache", "bosun.pid"); const DAEMON_PID_FILE = resolve(runtimeCacheDir, "bosun-daemon.pid"); const LEGACY_DAEMON_PID_FILE = resolve(__dirname, ".cache", "bosun-daemon.pid"); const DAEMON_LOG = resolve(__dirname, "logs", "daemon.log"); -const RESTART_REQUEST_FILE_NAME = "bosun-restart-request.json"; const SENTINEL_PID_FILE = resolve( runtimeCacheDir, "telegram-sentinel.pid", @@ -432,11 +427,8 @@ async function getConfiguredRuntimeCacheDirs() { try { const { loadConfig } = await import("./config/config.mjs"); const config = loadConfig(); - const configuredCacheDir = String(config?.cacheDir || "").trim() || null; - return uniqueResolvedPaths([ - runtimeCacheDir, - configuredCacheDir, - ...getRuntimeCacheDirCandidates(), + return getRuntimeCacheDirCandidates([ + String(config?.cacheDir || "").trim() || null, ]); } catch { return getRuntimeCacheDirCandidates(); @@ -504,22 +496,6 @@ function isProcessAlive(pid) { return true; } catch (err) { if (err && (err.code === "EPERM" || err.code === "EACCES")) { - if (process.platform === "win32") { - try { - const output = execFileSync( - "powershell", - [ - "-NoProfile", - "-Command", - `Get-CimInstance Win32_Process -Filter "ProcessId = ${Number(pid)}" -ErrorAction SilentlyContinue | Select-Object -ExpandProperty ProcessId`, - ], - { encoding: "utf8", stdio: ["pipe", "pipe", "pipe"], timeout: 3000, windowsHide: true }, - ).trim(); - return output === String(Number(pid)); - } catch { - return false; - } - } return true; } return false; @@ -950,7 +926,7 @@ function stopDaemon() { } } -async function daemonStatus() { +function daemonStatus() { const pid = getDaemonPid(); if (pid) { console.log(` bosun daemon is running (PID ${pid})`); @@ -966,8 +942,7 @@ async function daemonStatus() { } console.log(` Run --terminate to stop restart owners, then --daemon to restart.`); } else { - const configuredCacheDirs = await getConfiguredRuntimeCacheDirs(); - const existingMonitorOwner = detectExistingMonitorLockOwner(null, configuredCacheDirs); + const existingMonitorOwner = detectExistingMonitorLockOwner(); if (existingMonitorOwner) { console.log( ` bosun daemon is not running in daemon mode, but bosun monitor is active (PID ${existingMonitorOwner.pid}).`, @@ -1010,12 +985,7 @@ function findAllBosunProcessPids() { ]; const joined = patterns.join("|"); if (process.platform === "win32") { - let cmdPids = []; try { - // Primary: CommandLine-based scan. - // NOTE: Win32_Process.CommandLine can be null/empty under WMI race conditions - // for long-running processes. We re-verify null-CommandLine node.exe PIDs by - // querying them individually below to work around the WMI race. const out = execFileSync( "powershell", [ @@ -1034,51 +1004,14 @@ function findAllBosunProcessPids() { ], { encoding: "utf8", stdio: ["pipe", "pipe", "pipe"], timeout: 4000 }, ).trim(); - if (out) { - cmdPids = out - .split(/\r?\n/) - .map((s) => Number.parseInt(String(s).trim(), 10)) - .filter((pid) => Number.isFinite(pid) && pid > 0 && pid !== process.pid); - } - } catch { - // fall through to port-based fallback - } - - // Fallback: find node.exe PIDs listening on known Bosun ports (4400, 3080). - // This catches processes whose CommandLine was null in the WMI general scan. - let portPids = []; - try { - const netOut = execFileSync( - "powershell", - [ - "-NoProfile", - "-Command", - `$bosunPorts = @(4400,3080,4401,3081) - $listeners = Get-NetTCPConnection -State Listen -ErrorAction SilentlyContinue | - Where-Object { $bosunPorts -contains $_.LocalPort } | - Select-Object -ExpandProperty OwningProcess -Unique - $listeners | Where-Object { - $pid = [int]$_ - if ($pid -le 0 -or $pid -eq ${process.pid}) { return $false } - $proc = Get-CimInstance Win32_Process -Filter "ProcessId=$pid" -ErrorAction SilentlyContinue - $name = [string]$proc.Name - $name -match '^(node|electron|bosun)(\\.exe)?$' - }`, - ], - { encoding: "utf8", stdio: ["pipe", "pipe", "pipe"], timeout: 5000 }, - ).trim(); - if (netOut) { - portPids = netOut - .split(/\r?\n/) - .map((s) => Number.parseInt(String(s).trim(), 10)) - .filter((pid) => Number.isFinite(pid) && pid > 0 && pid !== process.pid); - } + if (!out) return []; + return out + .split(/\r?\n/) + .map((s) => Number.parseInt(String(s).trim(), 10)) + .filter((pid) => Number.isFinite(pid) && pid > 0 && pid !== process.pid); } catch { - // port scan unavailable — cmdPids is still used + return []; } - - // Merge both sets, deduplicating - return Array.from(new Set([...cmdPids, ...portPids])); } try { const out = execFileSync("pgrep", ["-f", joined], { @@ -1203,7 +1136,7 @@ function taskkillPidsElevated(pids, { force = false } = {}) { encoding: "utf8", stdio: ["ignore", "pipe", "pipe"], timeout: 30000, - windowsHide: true, + windowsHide: false, }, ); } catch { @@ -1471,8 +1404,6 @@ async function main() { console.error(` Error: ${err.message}`); process.exit(1); } - - process.exit(0); } @@ -1515,18 +1446,6 @@ async function main() { process.exit(exitCode ?? 0); } - const evalFlagIndex = args.indexOf("--eval"); - const evalCommandIndex = args.indexOf("eval"); - if (evalFlagIndex >= 0 || evalCommandIndex >= 0) { - const commandStartIndex = - evalFlagIndex >= 0 && evalCommandIndex >= 0 - ? Math.min(evalFlagIndex, evalCommandIndex) - : (evalCommandIndex >= 0 ? evalCommandIndex : evalFlagIndex); - const evalArgs = args.slice(commandStartIndex + 1); - const { runEvalCli } = await import("./bench/eval-framework.mjs"); - const { exitCode } = await runEvalCli(evalArgs); - process.exit(exitCode ?? 0); - } // Handle --help if (args.includes("--help") || args.includes("-h")) { showHelp(); @@ -1759,26 +1678,7 @@ async function main() { return; } if (args.includes("--daemon-status")) { - await daemonStatus(); - return; - } - if (args.includes("--restart")) { - try { - const result = await requestRunningBosunRestart("cli-restart"); - const modeLabel = result.daemon ? "daemon" : "monitor"; - console.log( - ` Requested Bosun code reload from the running ${modeLabel} instance (PID ${result.targetPid}).`, - ); - console.log( - " The live instance will restart itself with fresh modules using its current launch path.", - ); - console.log( - " If internal agent slots are busy, the reload stays queued until restart protection clears.\n", - ); - } catch (err) { - console.error(` --restart failed: ${err?.message || err}`); - process.exit(1); - } + daemonStatus(); return; } @@ -2192,12 +2092,9 @@ async function main() { // Handle --workspace-status if (args.includes("--workspace-status") || args.includes("workspace-status")) { const { getWorkspaceStateSummary } = await import("./workspace/workspace-manager.mjs"); - const { getToolOverheadReport } = await import("./agent/agent-tool-config.mjs"); const configDirArg = getArgValue("--config-dir"); const configDir = configDirArg || process.env.BOSUN_DIR || resolveConfigDirForCli(); const summary = getWorkspaceStateSummary(configDir); - const toolOverhead = getToolOverheadReport(configDir, "primary"); - const overheadSources = Object.entries(toolOverhead.bySource || {}); if (summary.length === 0) { console.log("\n No workspaces configured.\n"); } else { @@ -2215,16 +2112,8 @@ async function main() { console.log(` enabled workflows: ${ws.enabledWorkflows.join(", ")}`); } } + console.log(""); } - if (toolOverhead.total > 0 || overheadSources.length > 0) { - console.log(" Tool Overhead:"); - console.log(` Total tool chars: ${toolOverhead.total.toLocaleString("en-US")}`); - for (const [source, chars] of overheadSources) { - const warning = Number(chars) > 10000 ? " WARNING: high overhead" : ""; - console.log(` ${source}: ${Number(chars).toLocaleString("en-US")} chars${warning}`); - } - } - console.log(""); process.exit(0); } @@ -2425,8 +2314,7 @@ async function main() { process.exit(0); } - const configuredCacheDirs = await getConfiguredRuntimeCacheDirs(); - const existingOwner = detectExistingMonitorLockOwner(null, configuredCacheDirs); + const existingOwner = detectExistingMonitorLockOwner(); if (existingOwner) { console.log( `\n bosun is already running (PID ${existingOwner.pid}); exiting duplicate start.\n`, @@ -2522,19 +2410,13 @@ async function sendCrashNotification(exitCode, signal, options = {}) { // ── Self-restart exit code (must match monitor.mjs SELF_RESTART_EXIT_CODE) ─── const SELF_RESTART_EXIT_CODE = 75; +let monitorChild = null; function getMonitorPidFileCandidates(extraCacheDirs = []) { - return getPidFileCandidates("bosun.pid", extraCacheDirs); -} - -function getRestartRequestFileCandidates(extraCacheDirs = []) { - const cacheDirs = - Array.isArray(extraCacheDirs) && extraCacheDirs.length > 0 - ? uniqueResolvedPaths(extraCacheDirs) - : getRuntimeCacheDirCandidates(); - return cacheDirs.map((cacheDir) => - resolve(cacheDir, RESTART_REQUEST_FILE_NAME), - ); + return uniqueResolvedPaths([ + ...getPidFileCandidates("bosun.pid", extraCacheDirs), + resolve(__dirname, "..", ".cache", "bosun.pid"), + ]); } function tailLinesFromFile(filePath, maxLines = 200) { @@ -2608,9 +2490,9 @@ function shouldPauseDaemonRestartStorm(options) { return { pause: true, reasons: signals.reasons }; } -function detectExistingMonitorLockOwner(excludePid = null, extraCacheDirs = []) { +function detectExistingMonitorLockOwner(excludePid = null) { try { - for (const pidFile of getMonitorPidFileCandidates(extraCacheDirs)) { + for (const pidFile of getMonitorPidFileCandidates()) { let ownerPid = null; try { ownerPid = readAlivePid(pidFile); @@ -2635,56 +2517,6 @@ function detectExistingMonitorLockOwner(excludePid = null, extraCacheDirs = []) return null; } -async function requestRunningBosunRestart(reason = "cli-restart") { - const configuredCacheDirs = await getConfiguredRuntimeCacheDirs(); - const monitorOwner = detectExistingMonitorLockOwner(null, configuredCacheDirs); - const daemonPid = getDaemonPid(); - const targetPid = Number(monitorOwner?.pid || daemonPid || 0); - if (!targetPid) { - throw new Error("no running bosun instance found"); - } - - const targetCacheDirs = monitorOwner?.pidFile - ? [dirname(monitorOwner.pidFile)] - : configuredCacheDirs; - const requestPaths = getRestartRequestFileCandidates(targetCacheDirs); - const payload = { - id: `restart-${Date.now()}-${process.pid}`, - type: "code-reload", - reason: String(reason || "cli-restart"), - requestedAt: new Date().toISOString(), - requesterPid: process.pid, - targetPid, - argv: process.argv.slice(1), - }; - - let primaryPath = ""; - const failures = []; - for (const requestPath of requestPaths) { - try { - mkdirSync(dirname(requestPath), { recursive: true }); - writeFileSync(requestPath, JSON.stringify(payload, null, 2), "utf8"); - if (!primaryPath) primaryPath = requestPath; - } catch (err) { - failures.push(`${requestPath}: ${err?.message || err}`); - } - } - - if (!primaryPath) { - throw new Error( - `failed to write restart request (${failures.join("; ") || "unknown error"})`, - ); - } - - return { - targetPid, - requestPath: primaryPath, - requestPaths, - monitorOwner, - daemon: Boolean(daemonPid), - }; -} - function getRequiredMonitorRuntimeFiles(monitorPath) { const required = [monitorPath]; const copilotDir = resolve( @@ -2789,7 +2621,7 @@ function runMonitor({ restartReason = "" } = {}) { const exitCode = code ?? (signal ? 1 : 0); const existingOwner = !gracefulShutdown && exitCode === 1 - ? detectExistingMonitorLockOwner(childPid, [runtimeCacheDir]) + ? detectExistingMonitorLockOwner(childPid) : null; if (existingOwner) { console.log( @@ -2909,7 +2741,6 @@ function runMonitor({ restartReason = "" } = {}) { } // Let forked monitor handle signal cleanup — prevent parent from dying first -let monitorChild = null; let gracefulShutdown = false; process.on("SIGINT", () => { gracefulShutdown = true; diff --git a/compat.mjs b/compat.mjs index 740310c24..1f8780629 100644 --- a/compat.mjs +++ b/compat.mjs @@ -1,3 +1,4 @@ +#!/usr/bin/env node /** * compat.mjs — Backward compatibility for users migrating from codex-monitor * diff --git a/config/config.mjs b/config/config.mjs index 2eb174c6e..52c0ab13e 100644 --- a/config/config.mjs +++ b/config/config.mjs @@ -1,3 +1,5 @@ +#!/usr/bin/env node + /** * bosun — Configuration System * @@ -1732,65 +1734,6 @@ export function loadConfig(argv = process.argv, options = {}) { 0.2, { min: 0, max: 0.9 }, ), - startupGraceMs: parseBoundedInteger( - process.env.WORKFLOW_RECOVERY_STARTUP_GRACE_MS ?? - workflowRecoveryConfig.startupGraceMs, - 30_000, - { min: 0, max: 10 * 60 * 1000 }, - ), - startupStepDelayMs: parseBoundedInteger( - process.env.WORKFLOW_RECOVERY_STARTUP_STEP_DELAY_MS ?? - workflowRecoveryConfig.startupStepDelayMs, - 15_000, - { min: 0, max: 5 * 60 * 1000 }, - ), - }); - const rawMcpServersConfig = - configData.mcpServers && typeof configData.mcpServers === "object" - ? configData.mcpServers - : {}; - const mcpServers = Object.freeze({ - enabled: rawMcpServersConfig.enabled !== false, - defaultServers: Object.freeze( - Array.isArray(rawMcpServersConfig.defaultServers) - ? rawMcpServersConfig.defaultServers - .map((value) => String(value || "").trim()) - .filter(Boolean) - : [], - ), - catalogOverrides: - rawMcpServersConfig.catalogOverrides && - typeof rawMcpServersConfig.catalogOverrides === "object" - ? Object.freeze({ ...rawMcpServersConfig.catalogOverrides }) - : Object.freeze({}), - useDiscoveryProxy: rawMcpServersConfig.useDiscoveryProxy !== false, - includeCustomToolsInDiscoveryProxy: - rawMcpServersConfig.includeCustomToolsInDiscoveryProxy !== false, - discoveryProxyCacheTtlMs: parseBoundedInteger( - rawMcpServersConfig.discoveryProxyCacheTtlMs, - 60_000, - { min: 1000, max: 60 * 60 * 1000 }, - ), - discoveryProxyExecuteTimeoutMs: parseBoundedInteger( - rawMcpServersConfig.discoveryProxyExecuteTimeoutMs, - 10_000, - { min: 1000, max: 10 * 60 * 1000 }, - ), - autoInstallDefaults: rawMcpServersConfig.autoInstallDefaults === true, - allowExternalSources: isEnvEnabled( - process.env.BOSUN_MCP_ALLOW_EXTERNAL_SOURCES ?? - rawMcpServersConfig.allowExternalSources, - false, - ), - allowDefaultServers: isEnvEnabled( - process.env.BOSUN_MCP_ALLOW_DEFAULT_SERVERS ?? - rawMcpServersConfig.allowDefaultServers, - false, - ), - requireAuth: isEnvEnabled( - process.env.BOSUN_MCP_REQUIRE_AUTH ?? rawMcpServersConfig.requireAuth, - true, - ), }); const internalExecutor = { mode: ["internal", "hybrid"].includes(executorMode) @@ -1910,9 +1853,7 @@ export function loadConfig(argv = process.argv, options = {}) { // ── Tracing ────────────────────────────────────────────── const tracingEndpoint = process.env.BOSUN_OTEL_ENDPOINT || configData?.tracing?.endpoint || null; - const tracingEnabled = process.env.BOSUN_OTEL_ENDPOINT - ? true - : (configData?.tracing?.enabled ?? Boolean(tracingEndpoint)); + const tracingEnabled = configData?.tracing?.enabled ?? Boolean(tracingEndpoint); const tracingSampleRate = Number(configData?.tracing?.sampleRate ?? 1); // ── Telegram ───────────────────────────────────────────── @@ -2319,14 +2260,10 @@ export function loadConfig(argv = process.argv, options = {}) { }); // ── Status file ────────────────────────────────────────── - const configuredCacheDir = - String(configData.cacheDir || selectedRepository?.cacheDir || ".cache").trim() || - ".cache"; - const cacheDirBase = - hasExplicitConfigDir && !selectedRepository?.cacheDir && !configData.cacheDir - ? configDir - : repoRoot; - const cacheDir = resolve(cacheDirBase, configuredCacheDir); + const cacheDir = resolve( + repoRoot, + configData.cacheDir || selectedRepository?.cacheDir || ".cache", + ); const statusPath = process.env.STATUS_FILE || configData.statusPath || @@ -2396,7 +2333,6 @@ export function loadConfig(argv = process.argv, options = {}) { // Internal Executor internalExecutor, workflowRecovery, - mcpServers, executorMode: internalExecutor.mode, kanban, kanbanSource, diff --git a/config/repo-config.mjs b/config/repo-config.mjs index 7316d5eda..05df67463 100644 --- a/config/repo-config.mjs +++ b/config/repo-config.mjs @@ -33,13 +33,6 @@ import { const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); const _missingCodexHelpersWarned = new Set(); -const COMMON_DEFAULT_MCP_SERVER_IDS = new Set([ - "context7", - "sequential-thinking", - "playwright", - "microsoft-docs", - "microsoft_docs", -]); function warnMissingCodexHelper(name) { if (_missingCodexHelpersWarned.has(name)) return; @@ -163,47 +156,15 @@ function fallbackBuildCommonMcpBlocks() { return [ "", "[mcp_servers.context7]", - "startup_timeout_sec = 120", 'command = "npx"', 'args = ["-y", "@upstash/context7-mcp"]', "", - "[mcp_servers.sequential-thinking]", - "startup_timeout_sec = 120", - 'command = "npx"', - 'args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]', - "", - "[mcp_servers.playwright]", - "startup_timeout_sec = 120", - 'command = "npx"', - 'args = ["-y", "@playwright/mcp@latest"]', - "", "[mcp_servers.microsoft-docs]", 'url = "https://learn.microsoft.com/api/mcp"', "", ].join("\n"); } -function shouldIncludeInstalledMcpRepoBlocks(env = process.env) { - const raw = String(env?.BOSUN_MCP_INCLUDE_INSTALLED_REPO_CONFIG || "") - .trim() - .toLowerCase(); - return ["1", "true", "yes", "on", "y"].includes(raw); -} - -function listInstalledMcpServerIds(repoRoot) { - try { - const manifestPath = resolve(repoRoot, ".bosun", "library.json"); - if (!existsSync(manifestPath)) return []; - const manifest = JSON.parse(readFileSync(manifestPath, "utf8")); - return (manifest.entries || []) - .filter((entry) => entry?.type === "mcp") - .map((entry) => String(entry?.id || "").replace(/[^a-zA-Z0-9_-]/g, "_")) - .filter(Boolean); - } catch { - return []; - } -} - /** * Build TOML blocks for all installed MCP servers in the library. * Falls back to empty string if the library is not initialized or has no MCP entries. @@ -212,8 +173,7 @@ function listInstalledMcpServerIds(repoRoot) { * @param {string} repoRoot — workspace root directory * @returns {string} — TOML blocks for installed MCP servers */ -function buildInstalledMcpBlocks(repoRoot, options = {}) { - const { skipServerIds = new Set() } = options; +function buildInstalledMcpBlocks(repoRoot) { try { const manifestPath = resolve(repoRoot, ".bosun", "library.json"); if (!existsSync(manifestPath)) return ""; @@ -227,7 +187,7 @@ function buildInstalledMcpBlocks(repoRoot, options = {}) { const safeId = String(entry.id).replace(/[^a-zA-Z0-9_-]/g, "_"); // Skip entries that are already covered by common blocks - if (skipServerIds.has(safeId)) { + if (safeId === "context7" || safeId === "microsoft-docs") { continue; } @@ -264,49 +224,6 @@ function buildInstalledMcpBlocks(repoRoot, options = {}) { } } -function stripTomlSectionByName(toml, name) { - const header = `[mcp_servers.${name}]`; - const headerIdx = toml.indexOf(header); - if (headerIdx === -1) { - return { toml, changed: false }; - } - const removeFrom = (() => { - const lineStart = toml.lastIndexOf("\n", headerIdx); - return lineStart === -1 ? 0 : lineStart + 1; - })(); - const afterHeader = headerIdx + header.length; - const nextSection = toml.indexOf("\n[", afterHeader); - const sectionEnd = nextSection === -1 ? toml.length : nextSection + 1; - const nextToml = `${toml.slice(0, removeFrom)}${toml.slice(sectionEnd)}`.replace(/\n{3,}/g, "\n\n"); - return { toml: nextToml, changed: nextToml !== toml }; -} - -function stripManagedRepoCodexMcpSections(toml, repoRoot, env = process.env) { - let nextToml = String(toml || ""); - if (typeof codexConfig?.stripCommonMcpServerBlocks === "function") { - nextToml = codexConfig.stripCommonMcpServerBlocks(nextToml).toml; - } - if (shouldIncludeInstalledMcpRepoBlocks(env) || !repoRoot) { - return nextToml; - } - for (const id of listInstalledMcpServerIds(repoRoot)) { - while (true) { - const stripped = stripTomlSectionByName(nextToml, id); - if (!stripped.changed) break; - nextToml = stripped.toml; - } - } - return nextToml; -} - -function stripManagedVsCodeMcpServers(existingServers = {}) { - const next = { ...(existingServers || {}) }; - for (const id of COMMON_DEFAULT_MCP_SERVER_IDS) { - delete next[id]; - } - return next; -} - function fallbackBuildAgentSdkBlock({ primary = "codex" } = {}) { const normalized = String(primary || "codex").trim().toLowerCase(); const resolved = ["codex", "copilot", "claude"].includes(normalized) @@ -436,25 +353,6 @@ function mergeArrayUnique(existing, additions) { return result; } -function normalizeClaudePermissionsAllow(values) { - const normalized = []; - const seen = new Set(); - - for (const rawValue of values || []) { - let value = String(rawValue || "").trim(); - if (!value) continue; - - if (value === "computer:*") value = "Computer:*"; - if (value === "go *") continue; - - if (seen.has(value)) continue; - seen.add(value); - normalized.push(value); - } - - return normalized; -} - /** * Check whether a TOML string contains a given section header. * @param {string} toml @@ -487,30 +385,6 @@ function stripDeprecatedSandboxPermissions(toml) { ); } -function stripUnsupportedMicrosoftDocsToolsConfig(toml) { - let nextToml = String(toml || ""); - for (const name of ["microsoft-docs", "microsoft_docs"]) { - const header = `[mcp_servers.${name}]`; - const headerIdx = nextToml.indexOf(header); - if (headerIdx === -1) continue; - - const afterHeader = headerIdx + header.length; - const nextSection = nextToml.indexOf("\n[", afterHeader); - const resolvedSectionEnd = nextSection === -1 ? nextToml.length : nextSection; - const section = nextToml.substring(afterHeader, resolvedSectionEnd); - const cleaned = section.replace( - /^\s*tools\s*=\s*\[[^\n]*\]\s*(?:\r?\n)?/gm, - "", - ); - - if (cleaned !== section) { - nextToml = - nextToml.substring(0, afterHeader) + cleaned + nextToml.substring(resolvedSectionEnd); - } - } - return nextToml; -} - function ensureMcpStartupTimeout(toml, name, timeoutSec = 120) { const header = `[mcp_servers.${name}]`; const headerIdx = toml.indexOf(header); @@ -538,7 +412,7 @@ function ensureMcpStartupTimeout(toml, name, timeoutSec = 120) { */ function resolveBridgePath(explicit) { if (explicit) return explicit; - return "agent/agent-hook-bridge.mjs"; + return resolve(__dirname, "agent-hook-bridge.mjs"); } // ── 1. Codex project-level config.toml ────────────────────────────────────── @@ -620,17 +494,12 @@ export function buildRepoCodexConfig(options = {}) { parts.push(""); // ── MCP servers ── - const commonMcpBlocks = buildCommonMcpBlocks(env).trim(); - if (commonMcpBlocks) { - parts.push(commonMcpBlocks); - parts.push(""); - } + parts.push(buildCommonMcpBlocks().trim()); + parts.push(""); // ── Installed library MCP servers ── - if (repoRoot && shouldIncludeInstalledMcpRepoBlocks(env)) { - const installedBlocks = buildInstalledMcpBlocks(repoRoot, { - skipServerIds: commonMcpBlocks ? COMMON_DEFAULT_MCP_SERVER_IDS : new Set(), - }).trim(); + if (repoRoot) { + const installedBlocks = buildInstalledMcpBlocks(repoRoot).trim(); if (installedBlocks) { parts.push(installedBlocks); parts.push(""); @@ -652,8 +521,7 @@ export function buildRepoCodexConfig(options = {}) { * @param {string} generated Full generated content * @returns {string} Merged content */ -function mergeCodexToml(existing, generated, options = {}) { - const { repoRoot = "", env = process.env } = options; +function mergeCodexToml(existing, generated) { // Extract sections from generated content // A section starts with a line matching /^\[.+\]/ and runs until the next // section header or EOF. @@ -675,13 +543,7 @@ function mergeCodexToml(existing, generated, options = {}) { if (keyMatch) topLevelKeys.push(keyMatch[1]); } - let result = stripManagedRepoCodexMcpSections( - stripUnsupportedMicrosoftDocsToolsConfig( - stripDeprecatedSandboxPermissions(existing.trimEnd()), - ), - repoRoot, - env, - ); + let result = stripDeprecatedSandboxPermissions(existing.trimEnd()); // Add missing top-level keys for (const key of topLevelKeys) { @@ -712,9 +574,7 @@ function mergeCodexToml(existing, generated, options = {}) { result = ensureMcpStartupTimeout(result, "sequential-thinking", 120); result = ensureMcpStartupTimeout(result, "playwright", 120); - return stripUnsupportedMicrosoftDocsToolsConfig( - stripDeprecatedSandboxPermissions(result), - ).trimEnd() + "\n"; + return stripDeprecatedSandboxPermissions(result).trimEnd() + "\n"; } // ── 2. Claude settings.local.json ─────────────────────────────────────────── @@ -739,6 +599,8 @@ const CLAUDE_PERMISSIONS_ALLOW = [ // Web access (trusted domains) "WebFetch(domain:github.com)", "WebFetch(domain:bosun.ai)", + // Go toolchain + "go *", // File editing "Edit", "MultiEdit", @@ -746,7 +608,7 @@ const CLAUDE_PERMISSIONS_ALLOW = [ "Read", "Write", // Computer tool - "Computer:*", + "computer:*", ]; /** Claude Code permission deny list (empty — we trust managed repos). */ @@ -754,7 +616,7 @@ const CLAUDE_PERMISSIONS_DENY = []; /** * Build the Claude hooks object using the bosun bridge. - * @param {string} bridgePath Repo-relative or absolute path to agent-hook-bridge.mjs + * @param {string} bridgePath Absolute path to agent-hook-bridge.mjs * @returns {object} Hooks section for settings.local.json */ function buildClaudeHooks(bridgePath) { @@ -798,7 +660,7 @@ function buildClaudeHooks(bridgePath) { * * @param {object} options * @param {string} options.repoRoot Absolute path to the repo - * @param {string} [options.bosunBridgePath] Repo-relative or absolute path to agent-hook-bridge.mjs + * @param {string} [options.bosunBridgePath] Path to agent-hook-bridge.mjs * @returns {object} JSON-serializable settings object */ export function buildRepoClaudeSettings(options = {}) { @@ -838,9 +700,7 @@ function mergeClaudeSettings(existing, generated) { const genPerms = generated.permissions || {}; base.permissions = { - allow: normalizeClaudePermissionsAllow( - mergeArrayUnique(existingPerms.allow, genPerms.allow), - ), + allow: mergeArrayUnique(existingPerms.allow, genPerms.allow), deny: genPerms.deny || [], }; @@ -877,11 +737,7 @@ export function buildRepoVsCodeSettings(options = {}) { * * @returns {object} JSON-serializable MCP config */ -export function buildRepoVsCodeMcpConfig(options = {}) { - const { env = process.env } = options; - if (!buildCommonMcpBlocks(env).trim()) { - return { mcpServers: {} }; - } +export function buildRepoVsCodeMcpConfig() { return { mcpServers: { context7: { @@ -936,7 +792,7 @@ export function buildRepoVsCodeMcpConfig(options = {}) { * @param {string} repoRoot Absolute path to the repo directory * @param {object} [options] * @param {string} [options.primarySdk] "codex" | "copilot" | "claude" (default: "codex") - * @param {string} [options.bosunBridgePath] Repo-relative or absolute path to agent-hook-bridge.mjs + * @param {string} [options.bosunBridgePath] Path to agent-hook-bridge.mjs * @param {object} [options.env] Environment overrides * @param {boolean} [options.dryRun] If true, return results without writing files * @returns {RepoConfigResult} @@ -969,7 +825,7 @@ export function ensureRepoConfigs(repoRoot, options = {}) { if (existsSync(configPath)) { const existing = readFileSync(configPath, "utf8"); - const merged = mergeCodexToml(existing, generated, { repoRoot: root, env }); + const merged = mergeCodexToml(existing, generated); if (merged.trimEnd() === existing.trimEnd()) { result.codexConfig.skipped = true; } else if (!dryRun) { @@ -1068,7 +924,7 @@ export function ensureRepoConfigs(repoRoot, options = {}) { const mcpPath = resolve(root, ".vscode", "mcp.json"); result.vsCodeMcp.path = mcpPath; - const generated = buildRepoVsCodeMcpConfig({ env }); + const generated = buildRepoVsCodeMcpConfig(); if (existsSync(mcpPath)) { let existing = {}; @@ -1082,14 +938,11 @@ export function ensureRepoConfigs(repoRoot, options = {}) { existing.mcpServers || existing["github.copilot.mcpServers"] || (typeof existing === "object" && !existing.mcpServers ? existing : {}); - const normalizedExistingServers = buildCommonMcpBlocks(env).trim() - ? existingServers - : stripManagedVsCodeMcpServers(existingServers); // Existing servers take priority; add missing from generated const mergedServers = { ...generated.mcpServers, - ...(typeof normalizedExistingServers === "object" ? normalizedExistingServers : {}), + ...(typeof existingServers === "object" ? existingServers : {}), }; const next = { mcpServers: mergedServers }; diff --git a/full-node-test.log b/full-node-test.log deleted file mode 100644 index 7ac0f32dc..000000000 --- a/full-node-test.log +++ /dev/null @@ -1,959 +0,0 @@ -✔ infra\test-runtime-guards.mjs (988.1373ms) -✔ infra\test-runtime.mjs (932.4403ms) -✔ tests\agent-custom-tools.test.mjs (955.7957ms) -▶ agent endpoint stale-pid handling - ✔ treats process-not-found taskkill output as already exited (1.0102ms) - ✔ uses spawnSync taskkill with piped stdio (0.2841ms) - ✔ skips forced kill when port owner is not a bosun process (0.1855ms) - ✔ throttles access-denied cooldown warning spam per port (0.1841ms) - ✔ skips forced kill unless conflict reaping is explicitly enabled (1.7901ms) -✔ agent endpoint stale-pid handling (4.4074ms) -▶ agent-event-bus tracing integration - ✔ records agent errors as tracing metrics (942.5773ms) -✔ agent-event-bus tracing integration (943.4846ms) -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] started (stale-check=2000ms, log-cap=20) -[agent-event-bus] stopped -[agent-event-bus] started (stale-check=2000ms, log-cap=20) -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] started (stale-check=2000ms, log-cap=20) -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] auto-review queued for task-1 -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] auto-retry #1/3 for task-1 (build_failure) -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] listener error: listener boom -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] auto-retry #1/3 for task-1 (build_failure) -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] started (stale-check=2000ms, log-cap=20) -[agent-event-bus] stopped -[agent-event-bus] started (stale-check=2000ms, log-cap=20) -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] auto-retry #1/3 for task-1 (build_failure) -[agent-event-bus] stopped -[agent-event-bus] stopped -[agent-event-bus] auto-retry #1/5 for task-1 (build_failure) -[agent-event-bus] auto-retry #2/5 for task-1 (build_failure) -[agent-event-bus] task-1 reached retry threshold (2); routed to review workflow -[agent-event-bus] stopped -[agent-event-bus] cooldown 60000ms for task-1 (rate_limit) -[agent-event-bus] stopped -[agent-event-bus] auto-retry #1/3 for task-1 (build_failure) -[agent-event-bus] auto-retry #2/3 for task-1 (build_failure) -[agent-event-bus] auto-retry #3/3 for task-1 (build_failure) -[agent-event-bus] task-1 exhausted retries (3) -[agent-event-bus] auto-blocked task-1: threshold -[agent-event-bus] stopped -[agent-event-bus] auto-blocked task-1: too many errors -[agent-event-bus] stopped -[agent-event-bus] cooldown 60000ms for task-1 (rate_limit) -[agent-event-bus] auto-retry #1/3 for task-1 (build_failure) -[agent-event-bus] stopped -[agent-event-bus] manual review needed for task-1: build error 0 -[agent-event-bus] manual review needed for task-1: build error 1 -[agent-event-bus] manual review needed for task-1: build error 2 -[agent-event-bus] manual review needed for task-1: build error 3 -[agent-event-bus] stopped -▶ agent-event-bus - ▶ createAgentEventBus - ✔ returns an AgentEventBus instance (3.6959ms) - ✔ has correct AGENT_EVENT constants (0.7267ms) - ✔ createAgentEventBus (5.0235ms) - ▶ start / stop - ✔ starts and reports status (0.887ms) - ✔ stops cleanly (0.4672ms) - ✔ is idempotent — double start (0.3857ms) - ✔ start / stop (1.9146ms) - ▶ emit - ✔ records events in the log (1.4226ms) - ✔ broadcasts to UI via broadcastUiEvent (1.3061ms) - ✔ skips broadcast when opts.skipBroadcast is true (0.5364ms) - ✔ deduplicates events within the dedup window (0.5949ms) - ✔ allows same event type after dedup window expires (0.686ms) - ✔ enforces ring buffer max size (0.7016ms) - ✔ emit (5.5301ms) - ▶ addListener - ✔ notifies external listeners (0.6094ms) - ✔ allows unsubscribing (0.3874ms) - ✔ handles listener errors gracefully (0.9161ms) - ✔ addListener (2.0336ms) - ▶ onTaskStarted - ✔ emits TASK_STARTED with task details (0.7125ms) - ✔ onTaskStarted (0.7896ms) - ▶ onTaskCompleted - ✔ emits TASK_COMPLETED (0.47ms) - ✔ triggers auto-review when review agent is available (1.4829ms) - ✔ does not trigger review on failure (0.3714ms) - ✔ onTaskCompleted (2.4619ms) - ▶ onTaskFailed - ✔ emits TASK_FAILED with error message (1.5642ms) - ✔ classifies errors when errorDetector is available (1.534ms) - ✔ onTaskFailed (3.2047ms) - ▶ onAgentComplete - ✔ emits AGENT_COMPLETE (0.4911ms) - ✔ sets task status to inreview when hasCommits (0.9148ms) - ✔ onAgentComplete (1.4912ms) - ▶ onAgentError - ✔ emits AGENT_ERROR (0.4467ms) - ✔ onAgentError (0.4998ms) - ▶ onAgentHeartbeat - ✔ emits AGENT_HEARTBEAT and updates heartbeats map (0.6142ms) - ✔ onAgentHeartbeat (0.7074ms) - ▶ onStatusChange - ✔ emits TASK_STATUS_CHANGE (0.398ms) - ✔ sends telegram on blocked status (0.6139ms) - ✔ onStatusChange (1.0978ms) - ▶ onExecutorPaused / onExecutorResumed - ✔ emits EXECUTOR_PAUSED (37.7158ms) - ✔ emits EXECUTOR_RESUMED (0.3561ms) - ✔ onExecutorPaused / onExecutorResumed (38.1616ms) - ▶ onHookResult - ✔ emits HOOK_PASSED for passed hooks (0.3976ms) - ✔ emits HOOK_FAILED for failed hooks (0.3058ms) - ✔ onHookResult (0.784ms) - ▶ getEventLog - ✔ returns all events with no filter (0.303ms) - ✔ filters by taskId (0.3302ms) - ✔ filters by type (1.0632ms) - ✔ limits results (0.3672ms) - ✔ getEventLog (2.2147ms) - ▶ getErrorHistory - ✔ returns empty array for unknown task (0.4125ms) - ✔ records error history via classification (0.6756ms) - ✔ getErrorHistory (1.1655ms) - ▶ getErrorPatternSummary - ✔ returns empty when no errors (0.3541ms) - ✔ getErrorPatternSummary (0.4043ms) - ▶ getAgentLiveness - ✔ returns empty when no heartbeats (0.3093ms) - ✔ reports alive agents (0.3277ms) - ✔ reports stale agents after threshold (0.6985ms) - ✔ getAgentLiveness (1.4793ms) - ▶ getStatus - ✔ returns full system status (0.3851ms) - ✔ getStatus (0.4378ms) - ▶ stale agent detection - ✔ emits AGENT_STALE when heartbeat is overdue (0.6318ms) - ✔ does not emit retry queue updates when expire check makes no changes (0.4351ms) - ✔ emits retry queue updates when expire check removes queued tasks (0.5083ms) - ✔ stale agent detection (1.6811ms) - ▶ auto-actions - ✔ emits AUTO_RETRY when action is retry_with_prompt (0.7072ms) - ✔ invokes threshold hook instead of retry when threshold is reached (1.1732ms) - ✔ emits AUTO_COOLDOWN when action is cooldown (0.5272ms) - ✔ escalates to block after max retries exhausted (0.8503ms) - ✔ sends telegram on auto-block (0.6506ms) - ✔ clears cooldown on manual retry queue clear (0.7591ms) - ✔ auto-actions (4.8782ms) - ▶ pattern trend detection - ✔ detects repeated error patterns (0.8177ms) - ✔ pattern trend detection (0.8817ms) -✔ agent-event-bus (78.3085ms) -[agent-hooks] registered hook "test-prepush-1" for event "PrePush" (blocking) -[agent-hooks] registered hook "hook-f29fa43f" for event "SessionStart" -[agent-hooks] registered hook "dedup-test" for event "PrePush" -[agent-hooks] updated hook "dedup-test" for event "PrePush" -[agent-hooks] registered hook "sdk-wildcard" for event "SessionStart" -[agent-hooks] registered hook "remove-me" for event "PrePush" -[agent-hooks] unregistered hook "remove-me" from event "PrePush" -[agent-hooks] registered hook "pp-1" for event "PostPush" -[agent-hooks] registered hook "pp-2" for event "PostPush" -[agent-hooks] registered hook "ss-1" for event "SessionStart" -[agent-hooks] registered hook "push-1" for event "PrePush" -[agent-hooks] registered hook "exec-test" for event "SessionStart" -▶ agent-hooks - ▶ HOOK_EVENTS - ✔ should export the correct list of hook events (6.1277ms) - ✔ should be frozen (0.8084ms) - ✔ HOOK_EVENTS (7.9241ms) - ▶ TAG - ✔ should export a TAG constant (0.7168ms) - ✔ TAG (0.8961ms) - ▶ registerHook - ✔ should register a hook and return its ID (2.5782ms) - ✔ should auto-generate an ID if not provided (0.9341ms) - ✔ should throw on invalid event name (1.2303ms) - ✔ should deduplicate by ID (update instead of add) (2.3396ms) - ✔ should normalize SDK wildcards (1.0926ms) - ✔ registerHook (8.557ms) - ▶ unregisterHook - ✔ should remove a registered hook (0.7869ms) - ✔ should return false for non-existent hook (0.5828ms) - ✔ unregisterHook (1.4885ms) - ▶ getRegisteredHooks - ✔ should return hooks for a specific event (0.7259ms) - ✔ should return all hooks when no event specified (0.7701ms) - ✔ should throw on invalid event (1.0226ms) - ✔ getRegisteredHooks (2.6569ms) -[agent-hooks] registered hook "codex-only" for event "SessionStart" -[agent-hooks] registered hook "claude-only" for event "SessionStart" - ▶ executeHooks - ✔ should execute a non-blocking hook successfully (912.4453ms) -[agent-hooks] executeHooks called with unknown event: "UnknownEvent" -[agent-hooks] registered hook "fail-nonblock" for event "PostPush" - ✔ should filter hooks by SDK (787.5154ms) - ✔ should return empty array for unknown event (0.7365ms) -[agent-hooks] non-blocking hook "fail-nonblock" failed for event "PostPush" (exit 1) -[agent-hooks] registered hook "block-pass-1" for event "PrePush" (blocking) -[agent-hooks] registered hook "block-pass-2" for event "PrePush" (blocking) -[agent-hooks] blocking hook "block-pass-1" passed (593ms) -[agent-hooks] blocking hook "block-pass-2" passed (932ms) -[agent-hooks] all 2 blocking hook(s) passed for "PrePush" -[agent-hooks] registered hook "block-fail" for event "PreCommit" (blocking) -[agent-hooks] BLOCKING FAILURE: hook "block-fail" for event "PreCommit" — exit 42 (438ms) -[agent-hooks] 1/1 blocking hook(s) FAILED for "PreCommit" -[agent-hooks] registered hook "non-block-skip" for event "PrePR" -[agent-hooks] executeBlockingHooks called with unknown event: "NoSuchEvent" -[agent-hooks] registered hook "builtin-prepush-preflight" for event "PrePush" (blocking) -[agent-hooks] registered hook "builtin-task-complete-validation" for event "TaskComplete" (blocking) -[agent-hooks] registered hook "builtin-session-health-check" for event "SessionStart" -[agent-hooks] registered hook "builtin-prepush-fetch" for event "PrePush" -[agent-hooks] built-in hooks registered -[agent-hooks] registered hook "builtin-prepush-preflight" for event "PrePush" (blocking) -[agent-hooks] registered hook "builtin-task-complete-validation" for event "TaskComplete" (blocking) -[agent-hooks] registered hook "builtin-session-health-check" for event "SessionStart" -[agent-hooks] registered hook "builtin-prepush-fetch" for event "PrePush" -[agent-hooks] built-in hooks registered -[agent-hooks] registered hook "builtin-prepush-preflight" for event "PrePush" (blocking) -[agent-hooks] registered hook "builtin-task-complete-validation" for event "TaskComplete" (blocking) -[agent-hooks] registered hook "builtin-session-health-check" for event "SessionStart" -[agent-hooks] registered hook "builtin-prepush-fetch" for event "PrePush" -[agent-hooks] built-in hooks registered -[agent-hooks] updated hook "builtin-prepush-preflight" for event "PrePush" -[agent-hooks] updated hook "builtin-task-complete-validation" for event "TaskComplete" -[agent-hooks] updated hook "builtin-session-health-check" for event "SessionStart" -[agent-hooks] updated hook "builtin-prepush-fetch" for event "PrePush" -[agent-hooks] built-in hooks registered -[agent-hooks] built-in hooks disabled (mode=off) -[agent-hooks] registered hook "custom-prepush" for event "PrePush" (blocking) -[agent-hooks] skipped built-in PrePush hook (mode=auto) -[agent-hooks] registered hook "builtin-task-complete-validation" for event "TaskComplete" (blocking) -[agent-hooks] registered hook "builtin-session-health-check" for event "SessionStart" -[agent-hooks] built-in hooks registered -[agent-hooks] registered hook "custom-prepush" for event "PrePush" (blocking) -[agent-hooks] registered hook "builtin-prepush-preflight" for event "PrePush" (blocking) -[agent-hooks] registered hook "builtin-task-complete-validation" for event "TaskComplete" (blocking) -[agent-hooks] registered hook "builtin-session-health-check" for event "SessionStart" -[agent-hooks] registered hook "builtin-prepush-fetch" for event "PrePush" -[agent-hooks] built-in hooks registered -[agent-hooks] registered hook "from-file-1" for event "SessionStart" -[agent-hooks] registered hook "from-file-push" for event "PrePush" (blocking) -[agent-hooks] loaded 2 hook(s) from C:\Users\jON\Documents\source\repos\virtengine-gh\bosun\.cache\test-hooks\hooks.json -[agent-hooks] registered hook "alt-key" for event "PostPR" -[agent-hooks] loaded 1 hook(s) from C:\Users\jON\Documents\source\repos\virtengine-gh\bosun\.cache\test-hooks\hooks.json -[agent-hooks] config file not found: C:\nonexistent\path.json -[agent-hooks] invalid JSON in config file: C:\Users\jON\Documents\source\repos\virtengine-gh\bosun\.cache\test-hooks\bad.json Unexpected token 'o', "not json {{{" is not valid JSON -[agent-hooks] ignoring unknown hook event "FakeEvent" in config -[agent-hooks] registered hook "real" for event "PrePush" -[agent-hooks] loaded 1 hook(s) from C:\Users\jON\Documents\source\repos\virtengine-gh\bosun\.cache\test-hooks\hooks.json -[agent-hooks] registered hook "env-check" for event "PostCommit" (blocking) -[agent-hooks] blocking hook "env-check" passed for event "PostCommit" (3014ms) -[agent-hooks] registered hook "will-reset" for event "PrePush" -[agent-hooks] registered hook "also-reset" for event "SessionStart" - ✔ should handle failing non-blocking hooks gracefully (832.2375ms) - ✔ executeHooks (2533.2789ms) - ▶ executeBlockingHooks - ✔ should pass when all blocking hooks succeed (1527.1384ms) - ✔ should fail when a blocking hook returns non-zero (440.2841ms) - ✔ should skip non-blocking hooks (0.6195ms) - ✔ should return passed for unknown events (0.5155ms) - ✔ executeBlockingHooks (1968.7649ms) - ▶ registerBuiltinHooks - ✔ should register built-in PrePush and TaskComplete hooks (1.0428ms) - ✔ should register builtins with blocking=true (0.7369ms) - ✔ should be idempotent (no duplicates on re-call) (0.7369ms) - ✔ should skip builtins when mode=off (0.6354ms) - ✔ should auto-skip prepush builtin when custom prepush exists (1.0542ms) - ✔ should force builtins when mode=force even with custom hooks (0.7493ms) - ✔ registerBuiltinHooks (5.1837ms) - ▶ loadHooks - ✔ should load hooks from a config file (11.8135ms) - ✔ should return 0 for missing config file (0.6574ms) - ✔ should return 0 for invalid JSON (6.4683ms) - ✔ should support 'agentHooks' key as alternative (1.948ms) - ✔ should ignore unknown event names in config (7.9941ms) - ✔ loadHooks (29.1363ms) - ▶ environment variables - ✔ should pass VE_ env vars to hook processes (3015.4439ms) - ✔ environment variables (3015.7453ms) - ▶ resetHooks - ✔ should clear all registered hooks (0.7789ms) - ✔ resetHooks (0.8853ms) -✔ agent-hooks (7575.7348ms) -▶ agent-pool monitor-monitor thread refresh clamp - ✔ defines MONITOR_MONITOR_THREAD_REFRESH_TURNS_REMAINING constant (0.6714ms) - ✔ defaults refresh turns remaining to 5 (1.1021ms) - ✔ supports DEVMODE_MONITOR_MONITOR_THREAD_REFRESH_TURNS_REMAINING env override (0.1604ms) - ✔ proactively force-refreshes monitor-monitor thread when turns remaining reaches threshold (0.1881ms) - ✔ only applies refresh logic for monitor-monitor task key (0.4076ms) -✔ agent-pool monitor-monitor thread refresh clamp (4.1199ms) -▶ agent-pool node warning suppression - ✔ defines applyNodeWarningSuppressionEnv function (0.7787ms) - ✔ injects NODE_NO_WARNINGS=1 into spawned process env (1.6089ms) - ✔ supports opt-out via BOSUN_SUPPRESS_NODE_WARNINGS=0 (0.1273ms) - ✔ applies suppression to Codex/Copilot spawned processes (0.1196ms) - ✔ does not suppress warnings when BOSUN_SUPPRESS_NODE_WARNINGS is 0 (0.3252ms) -✔ agent-pool node warning suppression (3.9826ms) -✔ tests\agent-pool.test.mjs (690.0047ms) -▶ agent-prompts workspace - ✔ uses explicit prompt workspace override (7.5245ms) -[agent-prompts] prompt workspace fallback enabled: C:\Users\jON\AppData\Local\Temp\prompts-home-QKchTF\.bosun\agents (primary path failed: ENOTDIR) - ✔ falls back to HOME when primary prompt directory cannot be created (57.1335ms) - ✔ creates files with metadata hash in ensureAgentPromptWorkspace (29.3901ms) - ✔ detects missing files as updateAvailable (4.8895ms) - ✔ detects user-modified file as needsReview and not updateAvailable (21.8132ms) - 02:52:09 [markdown-safety] [audit] unsafe-orchestrator.md blocked: ignore-instructions directive, download-and-execute pipeline, website url - ✔ falls back to builtin prompts when a configured template file is blocked (764.8695ms) - ✔ applyPromptDefaultUpdates updates missing and outdated-unmodified files and skips needsReview (479.9408ms) - ✔ strips unresolved template placeholders passed as values (0.6113ms) - ✔ strips inline unresolved template placeholders inside larger values (0.2931ms) - ✔ skips custom tools context when no custom tools are registered (5.006ms) - ✔ renders custom tools context when a custom tool is registered (13.6864ms) -✔ agent-prompts workspace (1386.9168ms) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] started (assess every 60s) -[agent-supervisor] stopped -[agent-supervisor] started (assess every 60s) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] started (assess every 60s) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] intervening on task-api: api_error → continue_signal (reason: Transient API failure — continue the current thread and back off for 3 minute(s) if it repeats.) -[agent-supervisor] intervening on task-api: api_error → continue_signal (reason: Transient API failure — continue the current thread and back off for 5 minute(s) if it repeats.) -[agent-supervisor] intervening on task-api: api_error → continue_signal (reason: Transient API failure — continue the current thread and back off for 5 minute(s) if it repeats.) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-api-reset: api_error → continue_signal (reason: Transient API failure — continue the current thread and back off for 3 minute(s) if it repeats.) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: poor_quality → dispatch_fix (reason: poor_quality) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: idle_hard → continue_signal (reason: idle) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-api-intervene: api_error → continue_signal (reason: Transient API failure — continue the current thread and back off for 3 minute(s) if it repeats.) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: build_failure → inject_prompt (reason: build failure) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: token_overflow → force_new_thread (reason: token overflow) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: agent_dead → redispatch_task (reason: agent dead) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: error_loop → block_and_notify (reason: max retries) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: rate_limit_flood → pause_executor (reason: rate limit flood) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: poor_quality → dispatch_fix (reason: poor_quality) -[agent-supervisor] intervening on task-1: poor_quality → dispatch_fix (reason: review rejected) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: healthy → none (reason: healthy) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: idle_hard → continue_signal (reason: test) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: plan_stuck → inject_prompt (reason: plan_stuck) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] review approved for task-1 -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: poor_quality → dispatch_fix (reason: poor_quality) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: poor_quality → dispatch_fix (reason: poor_quality) -[agent-supervisor] stopped -[agent-supervisor] review approved for task-1 -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] started (assess every 60s) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: plan_stuck → inject_prompt (reason: test) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: idle_hard → continue_signal (reason: idle) -[agent-supervisor] stopped -[agent-supervisor] intervening on task-1: poor_quality → dispatch_fix (reason: poor quality) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] review REJECTED for task-1: 1 critical, 0 major issues (quality: 70) -[agent-supervisor] review REJECTED for task-1: 1 critical, 0 major issues (quality: 70) -[agent-supervisor] intervention failed for task-1: connection lost -[agent-supervisor] review REJECTED for task-1: 1 critical, 1 major issues (quality: 55) -[agent-supervisor] review REJECTED for task-1: 1 critical, 0 major issues (quality: 70) -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -[agent-supervisor] stopped -▶ agent-supervisor - ▶ createAgentSupervisor - ✔ returns an AgentSupervisor instance (3.405ms) - ✔ exposes SITUATION and INTERVENTION enums (1.2016ms) - ✔ createAgentSupervisor (5.5863ms) - ▶ start / stop - ✔ starts without error (0.9823ms) - ✔ stops without error (0.628ms) - ✔ is idempotent on start (0.5653ms) - ✔ start / stop (2.4332ms) - ▶ assess - ✔ returns HEALTHY for no signals (1.5915ms) - ✔ detects rate_limited from error text (0.4885ms) - ✔ detects rate_limit_flood when 3+ rate limits in sequence (0.732ms) - ✔ detects api_error from ECONNREFUSED (29.5728ms) - ✔ detects token_overflow (0.9943ms) - ✔ detects session_expired (0.7321ms) - ✔ detects model_error (0.5442ms) - ✔ detects build_failure (1.0467ms) - ✔ detects test_failure (0.5758ms) - ✔ detects git_conflict (0.5721ms) - ✔ detects push_failure (0.4298ms) - ✔ detects pre_push_failure (0.9577ms) - ✔ detects no_commits from context (2.6117ms) - ✔ detects poor_quality from review result (0.8298ms) - ✔ respects situation override from context (0.4548ms) - ✔ assess (42.8573ms) - ▶ health score - ✔ returns 0-100 range (0.4474ms) - ✔ scores higher with no signals than with errors (0.3262ms) - ✔ health score (0.8543ms) - ▶ intervention escalation - ✔ escalates through the ladder for plan_stuck (0.4323ms) - ✔ escalates for idle_hard from continue to inject to new thread to block (0.3575ms) - ✔ does not escalate for HEALTHY (0.3199ms) - ✔ uses continue-first recovery with cooldowns for repeated api_error (2.012ms) - ✔ resets api_error recovery when the error signature changes (0.4367ms) - ✔ intervention escalation (3.7038ms) - ▶ recovery prompts - ✔ generates plan_stuck prompt mentioning task title (0.5193ms) - ✔ generates false_completion prompt (0.3887ms) - ✔ generates no_commits prompt (0.401ms) - ✔ generates commits_not_pushed prompt with branch (0.3776ms) - ✖ does not diagnose commits_not_pushed when workflow owns push lifecycle (141.3565ms) - ✔ generates tool_loop prompt (0.7111ms) - ✔ generates error_loop prompt (0.4593ms) - ✔ generates poor_quality prompt with review issues (0.7177ms) - ✔ returns null for HEALTHY (no prompt needed) (0.399ms) - ✖ recovery prompts (145.8784ms) - ▶ intervene - ✔ dispatches CONTINUE_SIGNAL (0.5794ms) - ✔ records api_error continue cooldown state (0.6836ms) - ✔ dispatches INJECT_PROMPT (0.4805ms) - ✔ dispatches FORCE_NEW_THREAD (0.436ms) - ✔ dispatches REDISPATCH_TASK (0.4112ms) - ✔ dispatches BLOCK_AND_NOTIFY with telegram (0.5625ms) - ✔ dispatches PAUSE_EXECUTOR (2.7221ms) - ✔ dispatches DISPATCH_FIX with review issues (0.9674ms) - ✔ NONE does nothing (0.9648ms) - ✔ handles errors gracefully (0.9778ms) - ✔ intervene (9.34ms) - ▶ assessAndIntervene - ✔ assesses and dispatches in one call (0.5442ms) - ✔ does not dispatch for HEALTHY (0.329ms) - ✔ assessAndIntervene (0.9548ms) - ▶ review enforcement - ✔ records approved review (0.4169ms) - ✔ records rejected review and dispatches fix (0.4048ms) - ✔ canComplete returns false when review rejected (0.4493ms) - ✔ canComplete returns true when review approved (0.3135ms) - ✔ canComplete returns true for untracked tasks (0.375ms) - ✔ review enforcement (2.1511ms) - ▶ verifyCompletion - ✔ returns HEALTHY for good completion (1.5764ms) - ✔ detects no commits (0.8705ms) - ✔ detects no PR (0.9399ms) - ✔ detects plan_stuck from output (1.0124ms) - ✔ detects false completion from output without commits (0.3969ms) - ✔ verifyCompletion (4.968ms) - ▶ diagnostics - ✔ getTaskDiagnostics returns null for unknown task (0.4781ms) - ✔ getTaskDiagnostics returns data after assessment (0.6857ms) - ✔ getAllDiagnostics returns all tracked tasks (1.7206ms) - ✔ getSystemHealth returns expected shape (0.6191ms) - ✔ diagnostics (3.6325ms) - ▶ resetTask - ✔ clears all state for a task (0.6008ms) - ✔ resetTask (0.6619ms) - ▶ edge cases - ✔ handles missing dispatch functions gracefully (0.6026ms) - ✔ handles missing getTask gracefully (0.3533ms) - ✔ caps situation history to 50 entries (0.445ms) - ✔ caps health scores to 20 entries (1.5836ms) - ✔ emits supervisor-intervention event to event bus (0.8249ms) - ✔ DISPATCH_FIX falls back to inject_prompt if no review issues (4.5706ms) - ✔ edge cases (8.8601ms) - ▶ situation coverage - ✔ detects rate_limited from "429 rate limit exceeded" (0.4269ms) - ✔ detects api_error from "ETIMEDOUT connecting to api" (0.6479ms) - ✔ detects token_overflow from "context too long maximum exceeded" (0.6253ms) - ✔ detects session_expired from "session expired please login" (0.3469ms) - ✔ detects model_error from "model not supported claude-x" (0.278ms) - ✔ detects api_error from "ECONNREFUSED localhost:8080" (0.2834ms) - ✔ detects api_error from "500 Internal Server Error" (0.7382ms) - ✔ detects api_error from "502 Bad Gateway" (0.3141ms) - ✔ detects api_error from "fetch failed network error" (0.8371ms) - ✔ detects rate_limited from "quota exceeded" (0.6256ms) - ✔ detects session_expired from "thread not found" (0.7538ms) - ✔ detects token_overflow from "max token exceeded" (1.9899ms) - ✔ detects pre_push_failure from "pre-push hook failed exit code 1" (1.1682ms) - ✔ detects push_failure from "git push failed rejected" (1.0099ms) - ✔ detects build_failure from "go build failed compilation error" (0.3142ms) - ✔ detects test_failure from "FAIL github.com/pkg/test 1.2s" (0.5224ms) - ✔ detects lint_failure from "golangci-lint error found" (4.6519ms) - ✔ detects git_conflict from "merge conflict in README.md" (0.8128ms) - ✔ situation coverage (151.652ms) - ▶ auth/config/policy/sandbox situation detection - ✔ detects AUTH_FAILURE from "invalid api key" (0.8489ms) - ✔ detects AUTH_FAILURE from "authentication_error from Anthropic" (0.5506ms) - ✔ detects AUTH_FAILURE from "401 Unauthorized on /v1/chat" (0.4935ms) - ✔ detects AUTH_FAILURE from "403 Forbidden: access denied" (0.3474ms) - ✔ detects AUTH_FAILURE from "billing_hard_limit reached" (0.4479ms) - ✔ detects AUTH_FAILURE from "insufficient_quota for org" (0.3235ms) - ✔ detects AUTH_FAILURE from "invalid credentials supplied" (0.3307ms) - ✔ detects AUTH_FAILURE from "not authorized to access this model" (0.3617ms) - ✔ detects AUTH_FAILURE from "permission_error on resource" (0.7419ms) - ✔ detects CONTENT_POLICY from "content_policy_violation in response" (0.5919ms) - ✔ detects CONTENT_POLICY from "content filter blocked the output" (0.6359ms) - ✔ detects CONTENT_POLICY from "safety_system rejected request" (0.3778ms) - ✔ detects CONTENT_POLICY from "flagged content detected in prompt" (0.424ms) - ✔ detects CONTENT_POLICY from "output blocked by safety filter" (0.3429ms) - ✔ detects CODEX_SANDBOX from "sandbox failed to initialize" (0.9203ms) - ✔ detects CODEX_SANDBOX from "bwrap error: permission denied" (0.4059ms) - ✔ detects CODEX_SANDBOX from "bubblewrap failed with EPERM" (0.5839ms) - ✔ detects CODEX_SANDBOX from "EPERM: operation not permitted on /tmp" (0.2997ms) - ✔ detects CODEX_SANDBOX from "writable_roots paths not configured" (0.3512ms) - ✔ detects CODEX_SANDBOX from "codex segfault during execution" (0.3142ms) - ✔ detects CODEX_SANDBOX from "namespace error in sandbox" (0.3074ms) - ✔ detects INVALID_CONFIG from "config invalid: missing EXECUTOR field" (0.4709ms) - ✔ detects INVALID_CONFIG from "config missing for agent pool" (0.3044ms) - ✔ detects INVALID_CONFIG from "misconfigured agent settings detected" (0.336ms) - ✔ detects INVALID_CONFIG from "OPENAI_API_KEY not set in environment" (0.3133ms) - ✔ detects INVALID_CONFIG from "ANTHROPIC_API_KEY not set for claude exe" (0.2783ms) - ✔ AUTH_FAILURE intervention is immediate BLOCK_AND_NOTIFY (0.5384ms) - ✔ CONTENT_POLICY intervention is immediate BLOCK_AND_NOTIFY (0.8407ms) - ✔ CODEX_SANDBOX first escalation is INJECT_PROMPT (1.6019ms) - ✔ MODEL_ERROR intervention is immediate BLOCK_AND_NOTIFY (0.4198ms) - ✔ INVALID_CONFIG intervention is immediate BLOCK_AND_NOTIFY (0.3878ms) - ✔ auth/config/policy/sandbox situation detection (16.674ms) -✖ agent-supervisor (401.5623ms) -▶ agent-work-analyzer alert throttle improvements - ✔ defines FAILED_SESSION_ALERT_MIN_COOLDOWN_MS constant at 1 hour (1.9744ms) - ✔ defines FAILED_SESSION_TRANSIENT_ALERT_MIN_COOLDOWN_MS constant at 2 hours (0.2317ms) - ✔ detects transient-only sessions separately from high-error sessions (0.1534ms) - ✔ applies separate cooldown for transient error alerts (0.1162ms) - ✔ classifies transport/reconnect storms as transient-only sessions (0.266ms) -✔ agent-work-analyzer alert throttle improvements (4.141ms) -✔ failed-session alerts use task-scoped cooldown key and 1h cooldown floor (0.7939ms) -✔ emitAlert uses cooldown key builder and per-alert cooldown window (1.6102ms) -✔ cooldowns hydrate from alert log on startup to survive restarts (0.4141ms) -✔ stale alert cooldown entries are periodically pruned to bound memory (0.285ms) -✔ processLogFile resets offset when stream log is truncated (0.8187ms) -▶ agent-work-analyzer replay window normalization - ✔ defines normalizeReplayMaxBytes function (1.8638ms) - ✔ reads AGENT_ALERT_COOLDOWN_REPLAY_MAX_BYTES from env (0.118ms) - ✔ defaults replay max bytes to 8MB (0.1563ms) - ✔ enforces minimum of 256KB (0.1497ms) - ✔ enforces maximum of 64MB (0.1334ms) - ✔ uses normalizeReplayMaxBytes result for cooldown hydration (0.1995ms) -✔ agent-work-analyzer replay window normalization (3.9019ms) -✔ agent-work-analyzer defaults to tailing startup log from EOF (2.5576ms) -✔ startup tail mode clears replayed in-memory sessions (1.2929ms) -✔ analyzer ignores events that do not include an attempt_id (0.185ms) -✔ alert logging uses a stable fallback scope identifier (0.1352ms) -▶ analyze-agent-work helpers - ✔ filters records by date window while preserving invalid timestamps (1.3953ms) - ✔ builds error clusters and ranks by count (0.9721ms) - ✔ builds correlation summaries with grouped attributes (5.691ms) - ✔ marks complexity as unknown when task_description is missing (1.9478ms) - ✔ produces a stable JSON payload shape for correlations (1.2463ms) -✔ analyze-agent-work helpers (12.5414ms) -▶ analyze-agent-work JSONL fixture determinism - ✔ agent-errors-sample.jsonl contains 15 records (0.3071ms) - ✔ agent-metrics-sample.jsonl contains 20 records (0.1573ms) - ✔ all JSONL error records have required fields (1.9422ms) - ✔ all JSONL metric records have required fields (1.812ms) - ✔ filterRecordsByWindow filters JSONL errors to a 7-day window (1.8203ms) - ✔ filterRecordsByWindow filters JSONL metrics to a 7-day window (0.4574ms) - ✔ buildErrorClusters from JSONL data ranks timeout as top cluster (0.6225ms) - ✔ buildErrorClusters produces at least 3 distinct fingerprints from JSONL sample (0.265ms) - ✔ buildErrorCorrelationJsonPayload returns stable shape from JSONL fixtures (15.1119ms) - ✔ each correlation entry has the expected key set (1.582ms) - ✔ clusters are sorted descending by count (0.3607ms) - ✔ normalizeErrorFingerprint extracts a stable fingerprint from similar messages (0.23ms) -✔ analyze-agent-work JSONL fixture determinism (25.5558ms) -▶ analyze-agent-work CLI - ✔ prints a ranked correlation report with executor and size breakdowns (886.5894ms) - ✔ emits valid JSON and applies days/top filters (572.1029ms) - ✔ exits cleanly with a no-data message when the log directory is empty (363.2489ms) -✔ analyze-agent-work CLI (1822.1778ms) -▶ AnomalyDetector - ▶ Token Overflow (P0) - ✔ detects token overflow and marks process dead (26.171ms) - ✔ stops processing lines after token overflow (dead process) (0.8296ms) - ✔ Token Overflow (P0) (27.6287ms) - ▶ Model Not Supported (P0) - ✔ warns on first failure at medium severity, kills at threshold (0.7907ms) - ✔ Model Not Supported (P0) (0.9206ms) - ▶ Stream Death (P1) - ✔ detects stream completion error (0.6952ms) - ✔ Stream Death (P1) (0.7966ms) - ▶ Tool Call Loop (P2) - ✔ detects consecutive identical tool calls (0.9076ms) - ✔ resets counter when different tool is called (0.3836ms) - ✔ escalates to HIGH at kill threshold (0.4022ms) - ✔ does NOT false-positive on different edits to the same file (0.5086ms) - ✔ DOES detect truly identical edits to the same file (real death loop) (0.5207ms) - ✔ applies elevated thresholds for iterative tools (Editing, Reading) (0.5267ms) - ✔ ignores toolCallId differences when fingerprinting (0.2585ms) - ✔ Tool Call Loop (P2) (3.8877ms) - ▶ Rebase Spiral (P1) - ✔ detects repeated rebase --continue (0.5191ms) - ✔ counts rebase --abort separately (0.2756ms) - ✔ Rebase Spiral (P1) (0.8866ms) - ▶ Git Push Loop (P2) - ✔ detects repeated git push (0.2769ms) - ✔ Git Push Loop (P2) (0.362ms) - ▶ Subagent Waste (P2) - ✔ detects excessive subagent spawning (0.277ms) - ✔ Subagent Waste (P2) (0.3534ms) - ▶ Tool Failures (P3) - ✔ detects cascading tool failures (0.4217ms) - ✔ Tool Failures (P3) (0.4813ms) - ▶ Thought Spinning (P3) - ✔ detects repeated identical thoughts (0.7936ms) - ✔ ignores short thoughts (single tokens) (0.3709ms) - ✔ ignores short streaming token fragments (portal, trust) (13.7836ms) - ✔ Thought Spinning (P3) (15.0878ms) - ▶ Session Completion - ✔ marks process dead on Done event (0.3508ms) - ✔ marks process dead on task_complete event (0.2947ms) - ✔ Session Completion (0.7618ms) - ▶ getStats() - ✔ returns correct statistics (0.4991ms) - ✔ tracks dead processes separately (0.2192ms) - ✔ getStats() (0.8035ms) - ▶ getStatusReport() - ✔ returns formatted HTML report (0.4869ms) - ✔ getStatusReport() (0.5352ms) - ▶ Dedup protection - ✔ does not emit duplicate anomalies within dedup window (0.3769ms) - ✔ Dedup protection (0.4303ms) - ▶ Notifications - ✔ sends Telegram notification for CRITICAL anomalies (0.2632ms) - ✔ does not send notifications for LOW severity (0.2372ms) - ✔ Notifications (0.5718ms) - ▶ Meta enrichment - ✔ captures taskTitle from metadata (0.5536ms) - ✔ Meta enrichment (1.0305ms) - ▶ resetProcess() - ✔ clears tracking state for a process (0.2522ms) - ✔ resetProcess() (0.3093ms) - ▶ Command Failure Rate (P3) - ✔ detects high command failure rate (0.7306ms) - ✔ Command Failure Rate (P3) (0.8666ms) - ▶ Kill action escalation - ✔ emits kill action for subagent waste at kill threshold (1.8024ms) - ✔ emits kill action for tool failure cascade at kill threshold (0.2943ms) - ✔ emits kill action for git push loop at kill threshold (0.219ms) - ✔ Kill action escalation (2.5493ms) - ▶ Thought spinning exclusions - ✔ excludes operational test-running thoughts from spinning detection (0.2817ms) - ✔ excludes 'waiting for' thoughts from spinning detection (0.2598ms) - ✔ still detects genuine thought spinning (non-operational) (0.2806ms) - ✔ Thought spinning exclusions (0.9065ms) -✔ AnomalyDetector (60.4294ms) -▶ createAnomalyDetector factory - ✔ creates and starts a detector (0.2652ms) -✔ createAnomalyDetector factory (0.3162ms) -▶ Circuit breaker escalation - ✔ escalates warn-only anomalies to kill after 3 dedup cycles (402.0247ms) -[anomaly-detector] circuit breaker: GIT_PUSH_LOOP fired 3x for gitpush- — escalating to KILL -[anomaly-detector] circuit breaker: GIT_PUSH_LOOP fired 4x for gitpush- — escalating to KILL - ✔ escalates git push warn to kill after repeated warnings (330.5557ms) -✔ Circuit breaker escalation (732.8442ms) -▶ MODEL_NOT_SUPPORTED kill at threshold - ✔ emits kill action when model failures hit kill threshold (2.6721ms) -✔ MODEL_NOT_SUPPORTED kill at threshold (2.8129ms) -▶ apply-pr-suggestions - ▶ parseSuggestions - ✔ extracts single-line suggestion from comment body (3.4492ms) - ✔ extracts multi-line suggestion (0.3098ms) - ✔ filters by author when specified (0.2651ms) - ✔ ignores comments without suggestion blocks (0.2371ms) - ✔ handles multiple suggestions in one comment (0.4184ms) - ✔ parseSuggestions (8.7536ms) - ▶ removeOverlaps - ✔ keeps non-overlapping suggestions (0.4819ms) - ✔ removes overlapping suggestions (0.2713ms) - ✔ removeOverlaps (1.5693ms) - ▶ applyToContent - ✔ applies single-line replacement (0.2726ms) - ✔ applies multi-line replacement (0.1594ms) - ✔ applies multiple non-overlapping replacements bottom-to-top (0.2027ms) - ✔ handles replacement that changes line count (0.1441ms) - ✔ applyToContent (0.9861ms) -✔ apply-pr-suggestions (12.5158ms) -▶ async safety guards - ✔ handles monitor failure promises with explicit catch guards (1.6391ms) - ✔ guards detached monitor scheduler/notifier dispatches (0.7736ms) - ✔ guards agent-work-analyzer stuck sweep interval (0.1925ms) - ✔ guards agent-pool fire-and-forget registry operations (0.2357ms) - ✔ guards auto-update poll scheduling (0.21ms) -✔ async safety guards (4.0932ms) -▶ extractErrors - ✔ parses PowerShell error format with column and Line block (27.7198ms) - ✔ parses ParserError without column and uses last pipe message (0.4048ms) - ✔ parses At-line stack traces with plus blocks (0.3832ms) - ✔ parses generic error types like ParameterBindingException (0.3078ms) - ✔ deduplicates signatures and ignores terminating errors without file info (0.3203ms) - ✔ returns empty array for empty input or warning-only logs (0.2276ms) - ✔ fails on raw ANSI logs but succeeds after stripping ANSI codes (0.3349ms) -✔ extractErrors (30.7096ms) -▶ extractFallbackContext - ✔ handles empty logs (0.4238ms) - ✔ returns full tail for short logs (0.9058ms) - ✔ extracts tail and error indicators from long logs (0.9484ms) -✔ extractFallbackContext (2.4792ms) -▶ isDevMode + resetDevModeCache - ✔ returns true for AUTOFIX_MODE=dev (0.7016ms) - ✔ returns false for AUTOFIX_MODE=npm (analyze-only) (0.1675ms) - ✔ falls back to repo detection when mode is missing (14.4128ms) - ✔ returns false for explicit analyze-only modes (0.3388ms) - ✔ resets cached value (0.3335ms) -✔ isDevMode + resetDevModeCache (16.2123ms) -[autofix] npm mode — loop fix: analysis only -[autofix] npm mode — loop fix: analysis only -▶ getFixAttemptCount - ✔ increments per signature (29.0278ms) -[autofix] npm mode — loop fix: analysis only -[autofix] npm mode — loop fix: analysis only - ✔ keeps counts isolated per signature (7.3874ms) -✔ getFixAttemptCount (36.6251ms) -▶ bosun SWE-bench bridge - ✔ prints usage when invoked without a command (857.1137ms) - ✔ imports SWE-bench instances into the internal task store (1264.3324ms) -✔ bosun SWE-bench bridge (2122.6478ms) -▶ benchmark mode state - ✔ persists normalized repo-local benchmark mode state (15.356ms) - ✔ matches benchmark tasks by workspace path and generic benchmark metadata (6.9342ms) -✔ benchmark mode state (23.4898ms) -▶ bosun MCP server - ✖ lists the Bosun MCP tool surface over stdio (1434.9594ms) - ✖ supports creating and reading sessions through MCP tools (1395.2434ms) -✖ bosun MCP server (2832.3186ms) -▶ action.bosun_tool - ✔ is registered with correct schema (2.1075ms) - ✔ throws when toolId is missing (2.5052ms) - ✔ returns error output when tool not found (18.5146ms) - ✔ resolves toolId from template variables (3.1487ms) - ✔ stores result in outputVariable when configured (4.4084ms) -[workflow-engine] trigger:fired trigger (trigger.manual) [Start] wf=Test Workflow -[workflow-engine] node:start invoke (action.invoke_workflow) [Invoke Child] wf=Test Workflow -[workflow-engine] trigger:fired child-trigger (trigger.manual) [Start] wf=Child -[workflow-engine] node:complete invoke (action.invoke_workflow) [Invoke Child] -[workflow-engine] trigger:fired trigger (trigger.manual) [Start] wf=Test Workflow -[workflow-engine] node:start invoke (action.invoke_workflow) [Invoke] wf=Test Workflow -[workflow-engine] trigger:fired child-trigger (trigger.manual) [Start] wf=Test Workflow -[workflow-engine] node:start child-set-var (action.set_variable) [Set Var] wf=Test Workflow -[workflow-engine] node:complete child-set-var (action.set_variable) [Set Var] -[workflow-engine] node:complete invoke (action.invoke_workflow) [Invoke] -[workflow-engine] node:start log (notify.log) [Log] wf=Test Workflow -[workflow-nodes] Child ran: child-integration-wf -[workflow-engine] node:complete log (notify.log) [Log] -[workflow-engine] trigger:fired trigger (trigger.manual) [Start] wf=Test Workflow -[workflow-engine] node:start git-info (action.bosun_function) [Git Info] wf=Test Workflow -[workflow-engine] node:complete git-info (action.bosun_function) [Git Info] -[workflow-engine] node:start log (notify.log) [Log] wf=Test Workflow -[workflow-nodes] Branch: codex/site-demo-sync -[workflow-engine] node:complete log (notify.log) [Log] -[workflow-engine] trigger:fired trigger (trigger.manual) [Start] wf=Test Workflow -[workflow-engine] node:start fn (action.bosun_function) [Get Branch] wf=Test Workflow -[workflow-engine] node:complete fn (action.bosun_function) [Get Branch] -[workflow-engine] node:start log (notify.log) [Log Branch] wf=Test Workflow -[workflow-nodes] Branch: codex/site-demo-sync, Count: 34 -[workflow-engine] node:complete log (notify.log) [Log Branch] -[workflow-engine] trigger:fired trigger (trigger.manual) [Start] wf=Test Workflow -[workflow-engine] node:start get-branch (action.bosun_function) [Get Branch] wf=Test Workflow -[workflow-engine] node:complete get-branch (action.bosun_function) [Get Branch] -[workflow-engine] node:start invoke-child (action.invoke_workflow) [Invoke Child] wf=Test Workflow -[workflow-engine] trigger:fired child-trigger (trigger.manual) [Start] wf=Test Workflow -[workflow-engine] node:start child-action (action.set_variable) [Set Child Data] wf=Test Workflow -[workflow-engine] node:complete child-action (action.set_variable) [Set Child Data] -[workflow-engine] node:complete invoke-child (action.invoke_workflow) [Invoke Child] -[workflow-engine] node:start log (notify.log) [Final Log] wf=Test Workflow -[workflow-nodes] Branch: codex/site-demo-sync, Child: true -[workflow-engine] node:complete log (notify.log) [Final Log] - ✔ can invoke a builtin tool (list-todos) on a real workspace (545.6401ms) - ✔ resolves args with template interpolation (2.9723ms) - ✔ records Bosun tool execution in the execution ledger when engine hook exists (6.4512ms) -✔ action.bosun_tool (586.9636ms) -▶ action.build_task_prompt - ✔ splits user/system prompts and keeps system prompt stable across tasks (7.9687ms) - ✔ falls back to the task ID when the title is the default placeholder (2.5034ms) - ✔ injects workflow continuation guidance from issue advisor into task prompts (2.2112ms) -✔ action.build_task_prompt (12.9097ms) -▶ action.continue_session - ✔ prepends issue-advisor guidance to continuation prompts (0.5429ms) -✔ action.continue_session (0.6256ms) -▶ action.invoke_workflow - ✔ is registered with correct schema (2.8415ms) - ✔ throws when workflowId is empty (1.3957ms) - ✔ throws when engine is not available (1.275ms) - ✔ soft-fails when workflow not found and failOnError is false (default) (3.7343ms) - ✔ throws when workflow not found and failOnError is true (2.809ms) - ✔ sync mode executes child and forwards output (390.567ms) - ✔ dispatch mode returns immediately without waiting (4.344ms) - ✔ dispatch mode accepts synchronous engine return values (2.1863ms) - ✔ handles child workflow failure gracefully (failOnError=false) (2.7051ms) - ✔ throws on child failure when failOnError=true (1.7892ms) - ✔ forwards child workflow node outputs to parent (4.8569ms) - ✔ extracts from specific child nodes via extractFromNodes (1.2828ms) - ✔ filters forwarded fields via forwardFields (2.7749ms) - ✔ pipes parent context when pipeContext=true (1.3511ms) - ✔ resolves workflowId from template variables (1.8929ms) - ✔ integrates in a real workflow engine execution (353.8226ms) -✔ action.invoke_workflow (780.4235ms) -▶ action.bosun_function - ✔ is registered with correct schema (1.2409ms) - ✔ throws when function name is missing (0.8165ms) - ✔ throws for unknown function name (0.3952ms) - ✔ calls tools.builtin and returns builtin tool list (0.5065ms) - ✔ calls git.status and returns structured git info (708.4661ms) - ✔ calls git.branch and returns branch info (390.9952ms) - ✔ calls git.log and returns commit list (889.7763ms) - ✔ calls workflows.list with engine (0.6053ms) - ✔ calls config.show and returns config data (0.8279ms) - ✔ handles service unavailability gracefully (0.4218ms) - ✔ resolves function name from template variables (0.2739ms) - ✔ resolves args from template variables (652.1136ms) - ✔ supports extract config for field extraction (2.3357ms) - ✔ supports outputMap for field renaming (436.182ms) - ✔ integrates in a real workflow engine execution (385.5582ms) -✔ action.bosun_function (3471.1751ms) -▶ Bosun native templates - ✔ BOSUN_TOOL_PIPELINE_TEMPLATE has valid structure (0.9549ms) - ✔ WORKFLOW_COMPOSITION_TEMPLATE has valid structure (0.3678ms) - ✔ INLINE_WORKFLOW_COMPOSITION_TEMPLATE has valid structure (0.3203ms) - ✔ MCP_TO_BOSUN_BRIDGE_TEMPLATE has valid structure (0.2975ms) - ✔ GIT_HEALTH_PIPELINE_TEMPLATE has valid structure (0.3075ms) - ✔ all templates are registered in WORKFLOW_TEMPLATES (0.431ms) - ✔ all template nodes reference valid registered node types (0.6572ms) -✔ Bosun native templates (3.5625ms) -▶ cross-node data piping - ✔ pipes data from bosun_function to notify.log via templates (498.979ms) - ✔ chains bosun_function → invoke_workflow → notify.log (796.8455ms) -✔ cross-node data piping (1295.9598ms) diff --git a/github/github-app-auth.mjs b/github/github-app-auth.mjs index 31106746e..f10f61033 100644 --- a/github/github-app-auth.mjs +++ b/github/github-app-auth.mjs @@ -1,3 +1,4 @@ +#!/usr/bin/env node /** * github-app-auth.mjs — GitHub App JWT + Installation Token helpers * diff --git a/github/github-auth-manager.mjs b/github/github-auth-manager.mjs index 6e79790af..fa7e7895f 100644 --- a/github/github-auth-manager.mjs +++ b/github/github-auth-manager.mjs @@ -17,33 +17,6 @@ import { getInstallationTokenForRepo, } from "./github-app-auth.mjs"; -// Lazy vault import — avoids circular dependency at startup -let _vaultModule = null; -async function tryVaultGitHubToken() { - try { - if (!_vaultModule) _vaultModule = await import("../lib/vault.mjs"); - const { VaultStore } = _vaultModule; - const { keychainRead } = await import("../lib/vault-keychain.mjs"); - const v = new VaultStore(); - if (!v.isInitialized()) return null; - const key = keychainRead(); - if (!key) return null; - v.open(key); - // Look for a GitHub integration secret - const secrets = v.listSecrets().filter((s) => s.integration === "github"); - for (const s of secrets) { - const full = v.getSecret(s.id); - const t = full.fields?.token || full.fields?.apiKey || ""; - if (t) return t; - } - // Fallback: check vault env for GH_TOKEN - const envToken = v.getEnv("GH_TOKEN") || v.getEnv("GITHUB_TOKEN") || ""; - return envToken || null; - } catch { - return null; - } -} - // ── Constants ───────────────────────────────────────────────────────────────── const BOSUN_AUTH_STATE_PATH = join(homedir(), ".bosun", "github-auth-state.json"); @@ -114,7 +87,6 @@ async function getGhCliToken() { encoding: "utf8", stdio: ["pipe", "pipe", "pipe"], timeout: 5000, - windowsHide: process.platform === "win32", }).trim(); return token || null; } catch { @@ -211,14 +183,6 @@ export async function getGitHubToken(options = {}) { } } - // ── 5. Bosun vault fallback ─────────────────────────────────────────────── - if (!isSkipped("vault")) { - const vaultToken = await tryVaultGitHubToken(); - if (vaultToken) { - return { token: vaultToken, type: "vault" }; - } - } - throw new Error( "No GitHub auth available. Set GITHUB_TOKEN, run `gh auth login`, " + "or configure the Bosun GitHub App (BOSUN_GITHUB_APP_ID + BOSUN_GITHUB_PRIVATE_KEY_PATH).", diff --git a/infra/container-runner.mjs b/infra/container-runner.mjs index 340b016b8..afc96215a 100644 --- a/infra/container-runner.mjs +++ b/infra/container-runner.mjs @@ -20,23 +20,6 @@ import { spawn, spawnSync, execSync } from "node:child_process"; import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; import { resolve, basename, join } from "node:path"; -function resolveProcessCommand(command) { - const normalizedCommand = String(command || "").trim(); - if (!normalizedCommand || process.platform !== "win32") { - return { command: normalizedCommand, needsShell: false }; - } - - if (/^[A-Za-z]:[\\/]|^[\\/]{2}|[\\/]/.test(normalizedCommand)) { - return { command: normalizedCommand, needsShell: false }; - } - - if (/\.(?:cmd|bat|exe|com)$/i.test(normalizedCommand)) { - return { command: normalizedCommand, needsShell: false }; - } - - return { command: `${normalizedCommand}.cmd`, needsShell: true }; -} - // ── Configuration ──────────────────────────────────────────────────────────── const containerEnabled = ["1", "true", "yes"].includes( @@ -344,17 +327,15 @@ async function runIsolatedProcess(options = {}) { } = options; return new Promise((resolvePromise) => { - const { command: resolvedCommand, needsShell } = resolveProcessCommand(command); const useArgv = Array.isArray(args) && args.length > 0; const proc = useArgv - ? spawn(String(resolvedCommand || ""), args.map((arg) => String(arg)), { + ? spawn(String(command || ""), args.map((arg) => String(arg)), { cwd, env: { ...process.env, ...env }, stdio: ["ignore", "pipe", "pipe"], windowsHide: true, - shell: needsShell, }) - : spawn(String(resolvedCommand || ""), { + : spawn(String(command || ""), { cwd, env: { ...process.env, ...env }, stdio: ["ignore", "pipe", "pipe"], diff --git a/infra/guardrails.mjs b/infra/guardrails.mjs deleted file mode 100644 index 05cdb793a..000000000 --- a/infra/guardrails.mjs +++ /dev/null @@ -1,314 +0,0 @@ -import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; -import { resolve } from "node:path"; - -export const DEFAULT_INPUT_POLICY = Object.freeze({ - enabled: true, - warnThreshold: 60, - blockThreshold: 35, - minTitleLength: 8, - minDescriptionLength: 24, - minContextFields: 1, - minCombinedTokens: 10, -}); - -export const DEFAULT_PUSH_POLICY = Object.freeze({ - workflowOnly: true, - blockAgentPushes: true, - requireManagedPrePush: true, -}); - -const GENERIC_TEXT_PATTERNS = [ - /\b(?:asdf|placeholder|tbd|todo|unknown|misc|thing|stuff|whatever)\b/i, - /^(?:fix|test|tmp|wip|na|n\/a|none|help)$/i, - /^\W+$/, -]; - -function parseBooleanLike(value, fallback) { - if (value === undefined || value === null || value === "") return fallback; - const normalized = String(value).trim().toLowerCase(); - if (["1", "true", "yes", "on"].includes(normalized)) return true; - if (["0", "false", "no", "off"].includes(normalized)) return false; - return fallback; -} - -function clampNumber(value, min, max, fallback) { - const numeric = Number(value); - if (!Number.isFinite(numeric)) return fallback; - return Math.min(max, Math.max(min, numeric)); -} - -function normalizeText(value) { - return String(value || "").replace(/\s+/g, " ").trim(); -} - -function collectTextValues(value, bucket = []) { - if (value == null) return bucket; - if (typeof value === "string" || typeof value === "number" || typeof value === "boolean") { - const normalized = normalizeText(value); - if (normalized) bucket.push(normalized); - return bucket; - } - if (Array.isArray(value)) { - for (const entry of value) collectTextValues(entry, bucket); - return bucket; - } - if (typeof value === "object") { - for (const [key, entry] of Object.entries(value)) { - if (["guardrailsOverride", "overrideGuardrails", "INPUTOverride"].includes(key)) continue; - collectTextValues(entry, bucket); - } - } - return bucket; -} - -function tokenize(text) { - return normalizeText(text) - .toLowerCase() - .split(/[^a-z0-9]+/i) - .filter(Boolean); -} - -function addFinding(findings, id, penalty, message) { - findings.push({ - id, - penalty, - message, - severity: penalty >= 25 ? "high" : penalty >= 15 ? "medium" : "low", - }); -} - -function readPolicyFile(policyPath) { - if (!existsSync(policyPath)) return {}; - try { - return JSON.parse(readFileSync(policyPath, "utf8")); - } catch { - return {}; - } -} - -function normalizeScriptEntries(scripts, matcher) { - return Object.entries(scripts) - .filter(([name]) => matcher.test(String(name || ""))) - .map(([name, command]) => ({ name, command: String(command || "") })); -} - -export function normalizeINPUTPolicy(raw = {}) { - return { - enabled: parseBooleanLike(raw?.enabled, DEFAULT_INPUT_POLICY.enabled), - warnThreshold: clampNumber(raw?.warnThreshold, 1, 100, DEFAULT_INPUT_POLICY.warnThreshold), - blockThreshold: clampNumber(raw?.blockThreshold, 0, 100, DEFAULT_INPUT_POLICY.blockThreshold), - minTitleLength: clampNumber(raw?.minTitleLength, 0, 200, DEFAULT_INPUT_POLICY.minTitleLength), - minDescriptionLength: clampNumber(raw?.minDescriptionLength, 0, 2000, DEFAULT_INPUT_POLICY.minDescriptionLength), - minContextFields: clampNumber(raw?.minContextFields, 0, 50, DEFAULT_INPUT_POLICY.minContextFields), - minCombinedTokens: clampNumber(raw?.minCombinedTokens, 0, 200, DEFAULT_INPUT_POLICY.minCombinedTokens), - }; -} - -export function normalizePushPolicy(raw = {}) { - return { - workflowOnly: parseBooleanLike(raw?.workflowOnly, DEFAULT_PUSH_POLICY.workflowOnly), - blockAgentPushes: parseBooleanLike(raw?.blockAgentPushes, DEFAULT_PUSH_POLICY.blockAgentPushes), - requireManagedPrePush: parseBooleanLike(raw?.requireManagedPrePush, DEFAULT_PUSH_POLICY.requireManagedPrePush), - }; -} - -export function normalizeGuardrailsPolicy(raw = {}) { - const source = raw && typeof raw === "object" ? raw : {}; - return { - INPUT: normalizeINPUTPolicy(source?.INPUT && typeof source.INPUT === "object" ? source.INPUT : {}), - push: normalizePushPolicy(source?.push && typeof source.push === "object" ? source.push : {}), - }; -} - -export function getGuardrailsPolicyPath(rootDir) { - return resolve(rootDir, ".bosun", "guardrails.json"); -} - -export function loadGuardrailsPolicy(rootDir) { - const policyPath = getGuardrailsPolicyPath(rootDir); - return normalizeGuardrailsPolicy(readPolicyFile(policyPath)); -} - -export function saveGuardrailsPolicy(rootDir, raw = {}) { - const normalized = normalizeGuardrailsPolicy(raw); - const policyPath = getGuardrailsPolicyPath(rootDir); - mkdirSync(resolve(rootDir, ".bosun"), { recursive: true }); - writeFileSync(policyPath, JSON.stringify(normalized, null, 2) + "\n", "utf8"); - return normalized; -} - -export function ensureGuardrailsPolicy(rootDir) { - const policyPath = getGuardrailsPolicyPath(rootDir); - if (!existsSync(policyPath)) { - return saveGuardrailsPolicy(rootDir, { INPUT: DEFAULT_INPUT_POLICY, push: DEFAULT_PUSH_POLICY }); - } - const normalized = loadGuardrailsPolicy(rootDir); - writeFileSync(policyPath, JSON.stringify(normalized, null, 2) + "\n", "utf8"); - return normalized; -} - -export function shouldBlockAgentPushes(rootDir) { - return loadGuardrailsPolicy(rootDir).push.blockAgentPushes !== false; -} - -export function shouldRequireManagedPrePush(rootDir) { - return loadGuardrailsPolicy(rootDir).push.requireManagedPrePush !== false; -} - -export function detectRepoGuardrails(rootDir) { - const packageJsonPath = resolve(rootDir, "package.json"); - let packageJson = null; - try { - packageJson = JSON.parse(readFileSync(packageJsonPath, "utf8")); - } catch { - packageJson = null; - } - - const scripts = packageJson?.scripts && typeof packageJson.scripts === "object" - ? packageJson.scripts - : {}; - const prepushScripts = normalizeScriptEntries(scripts, /^(?:prepush(?::|$)|pre-push$|check:prepush$)/i); - const prepublishScripts = normalizeScriptEntries(scripts, /^(?:prepublish(?:only)?(?::|$)|pre-publish$)/i); - const ciScripts = normalizeScriptEntries( - scripts, - /^(?:ci(?::|$)|test(?::|$)|build(?::|$)|lint(?::|$)|check(?::|$)|verify(?::|$)|release(?::|$))/i, - ).filter((entry) => !prepushScripts.some((candidate) => candidate.name === entry.name)); - - const categories = { - prepush: { - detected: prepushScripts.length > 0, - enforced: prepushScripts.length > 0, - scripts: prepushScripts, - }, - prepublish: { - detected: prepublishScripts.length > 0, - enforced: prepublishScripts.length > 0, - scripts: prepublishScripts, - }, - ci: { - detected: ciScripts.length > 0, - enforced: ciScripts.length > 0, - scripts: ciScripts, - }, - }; - - return { - rootDir, - packageJsonPath, - hasPackageJson: packageJson != null, - packageName: typeof packageJson?.name === "string" ? packageJson.name : "", - categories, - detectedCount: Object.values(categories).filter((entry) => entry.detected).length, - }; -} - -export function assessInputQuality(input = {}, policy = DEFAULT_INPUT_POLICY) { - const normalizedPolicy = normalizeINPUTPolicy(policy); - const title = normalizeText(input?.title); - const description = normalizeText(input?.description); - const metadataValues = collectTextValues(input?.metadata || {}); - const formValues = collectTextValues(input?.formValues || {}); - const contextValues = [...metadataValues, ...formValues]; - const combinedText = [title, description, ...contextValues].filter(Boolean).join(" "); - const tokens = tokenize(combinedText); - const uniqueTokens = new Set(tokens); - const uniqueTokenRatio = tokens.length > 0 ? uniqueTokens.size / tokens.length : 0; - const genericHits = [title, description, ...contextValues].filter(Boolean).filter((value) => - GENERIC_TEXT_PATTERNS.some((pattern) => pattern.test(value)), - ); - - const findings = []; - let score = 100; - - if (normalizedPolicy.enabled !== true) { - return { - policy: normalizedPolicy, - score, - status: "disabled", - blocked: false, - summary: "INPUT guardrails are disabled.", - findings, - metrics: { - titleLength: title.length, - descriptionLength: description.length, - contextFieldCount: contextValues.length, - tokenCount: tokens.length, - uniqueTokenRatio, - }, - }; - } - - if (!title) { - score -= 45; - addFinding(findings, "missing-title", 45, "A clear title is required."); - } else if (title.length < normalizedPolicy.minTitleLength) { - const penalty = 30; - score -= penalty; - addFinding(findings, "short-title", penalty, `Title should be at least ${normalizedPolicy.minTitleLength} characters.`); - } - - if (!description) { - score -= 35; - addFinding(findings, "missing-description", 35, "Add a description with enough implementation context."); - } else if (description.length < normalizedPolicy.minDescriptionLength) { - const penalty = 15; - score -= penalty; - addFinding(findings, "thin-description", penalty, `Description should be at least ${normalizedPolicy.minDescriptionLength} characters.`); - } - - if (contextValues.length < normalizedPolicy.minContextFields) { - const penalty = 15; - score -= penalty; - addFinding(findings, "missing-context", penalty, `Provide at least ${normalizedPolicy.minContextFields} populated context field(s).`); - } - - if (tokens.length < normalizedPolicy.minCombinedTokens) { - const penalty = 20; - score -= penalty; - addFinding(findings, "low-signal", penalty, `Input should contain at least ${normalizedPolicy.minCombinedTokens} meaningful tokens.`); - } - - if (uniqueTokenRatio > 0 && uniqueTokenRatio < 0.45) { - const penalty = 10; - score -= penalty; - addFinding(findings, "repetitive-input", penalty, "Input is too repetitive to be reliable."); - } - - if (title && description && title.toLowerCase() === description.toLowerCase()) { - const penalty = 10; - score -= penalty; - addFinding(findings, "duplicated-summary", penalty, "Title and description should not repeat the same text."); - } - - if (genericHits.length > 0) { - const penalty = Math.min(30, genericHits.length * 10); - score -= penalty; - addFinding(findings, "generic-language", penalty, "Replace placeholder or generic text with concrete intent."); - } - - score = Math.max(0, Math.min(100, score)); - const status = score < normalizedPolicy.blockThreshold - ? "block" - : score < normalizedPolicy.warnThreshold - ? "warn" - : "pass"; - - return { - policy: normalizedPolicy, - score, - status, - blocked: status === "block", - summary: - findings[0]?.message || - (status === "pass" ? "Input quality passed INPUT guardrails." : "Input quality needs more detail."), - findings, - metrics: { - titleLength: title.length, - descriptionLength: description.length, - contextFieldCount: contextValues.length, - tokenCount: tokens.length, - uniqueTokenRatio: Number(uniqueTokenRatio.toFixed(3)), - genericHitCount: genericHits.length, - }, - }; -} diff --git a/infra/heartbeat-monitor.mjs b/infra/heartbeat-monitor.mjs deleted file mode 100644 index 3bcaf5613..000000000 --- a/infra/heartbeat-monitor.mjs +++ /dev/null @@ -1,432 +0,0 @@ -import { request as httpRequest } from "node:http"; -import { existsSync, mkdirSync, readFileSync, appendFileSync } from "node:fs"; -import { open } from "node:fs/promises"; -import { request as httpsRequest } from "node:https"; -import { monitorEventLoopDelay } from "node:perf_hooks"; -import { dirname, resolve } from "node:path"; - -const DEFAULT_INTERVAL_MS = 30_000; -const DEFAULT_TIMEOUT_MS = 5_000; -const DEFAULT_SUCCESS_LOG_INTERVAL_MS = 10 * 60_000; -const DEFAULT_EVENT_LOOP_WARN_MS = 1_000; -const DEFAULT_TAIL_LINES = 40; -const DEFAULT_TAIL_BYTES = 16 * 1024; -const HEARTBEAT_LOG_FILE = "heartbeat-monitor.log"; -const UI_LAST_PORT_FILE = "ui-last-port.json"; - -function clampPositiveNumber(value, fallback, min = 1) { - const numeric = Number(value); - if (!Number.isFinite(numeric) || numeric < min) return fallback; - return Math.trunc(numeric); -} - -function summarizeEventLoopDelay(histogram) { - if (!histogram) { - return { meanMs: 0, maxMs: 0, p99Ms: 0 }; - } - return { - meanMs: Number((histogram.mean / 1e6).toFixed(3)) || 0, - maxMs: Number((histogram.max / 1e6).toFixed(3)) || 0, - p99Ms: Number((histogram.percentile(99) / 1e6).toFixed(3)) || 0, - }; -} - -function isAbortTimeoutError(error) { - const name = String(error?.name || "").trim().toLowerCase(); - const message = String(error?.message || "").trim().toLowerCase(); - return name === "aborterror" || message.includes("aborted"); -} - -function resolveHeartbeatLogPath(logDir) { - return resolve(String(logDir || process.cwd()), HEARTBEAT_LOG_FILE); -} - -function resolveUiLastPortPath(configDir) { - return resolve(String(configDir || process.cwd()), ".cache", UI_LAST_PORT_FILE); -} - -function normalizeUiProbeHost(host) { - const normalized = String(host || "").trim().toLowerCase(); - if ( - !normalized || - normalized === "0.0.0.0" || - normalized === "::" || - normalized === "[::]" || - normalized === "::0" - ) { - return "127.0.0.1"; - } - return normalized === "localhost" ? "127.0.0.1" : normalized; -} - -function buildProbeUrl({ protocol = "http", host = "127.0.0.1", port = 0, healthPath = "/healthz" } = {}) { - const safeProtocol = String(protocol || "").trim().toLowerCase() === "https" ? "https" : "http"; - const safeHost = normalizeUiProbeHost(host); - const safePort = Number(port); - if (!Number.isFinite(safePort) || safePort <= 0 || safePort > 65535) return null; - const safeHealthPath = String(healthPath || "/healthz").startsWith("/") - ? String(healthPath || "/healthz") - : `/${String(healthPath || "healthz")}`; - return `${safeProtocol}://${safeHost}:${Math.trunc(safePort)}${safeHealthPath}`; -} - -function readUiProbeTarget(configDir, fallbackHost = "127.0.0.1", healthPath = "/healthz") { - try { - const portPath = resolveUiLastPortPath(configDir); - if (!existsSync(portPath)) { - return { - port: 0, - host: normalizeUiProbeHost(fallbackHost), - protocol: "http", - url: null, - }; - } - const payload = JSON.parse(readFileSync(portPath, "utf8")); - const port = Number(payload?.port || 0); - if (!Number.isFinite(port) || port <= 0 || port > 65535) { - return { - port: 0, - host: normalizeUiProbeHost(fallbackHost), - protocol: "http", - url: null, - }; - } - let protocol = String(payload?.protocol || "").trim().toLowerCase() === "https" ? "https" : "http"; - let host = normalizeUiProbeHost(payload?.host || fallbackHost); - const rawUrl = String(payload?.url || "").trim(); - if (rawUrl) { - try { - const parsed = new URL(rawUrl); - if (parsed.port) { - const parsedPort = Number(parsed.port); - if (Number.isFinite(parsedPort) && parsedPort > 0 && parsedPort <= 65535) { - host = normalizeUiProbeHost(parsed.hostname || host); - protocol = parsed.protocol === "https:" ? "https" : "http"; - } - } - } catch { - // Fall back to discrete metadata fields. - } - } - return { - port: Math.trunc(port), - host, - protocol, - url: buildProbeUrl({ protocol, host, port, healthPath }), - }; - } catch { - return { - port: 0, - host: normalizeUiProbeHost(fallbackHost), - protocol: "http", - url: null, - }; - } -} - -async function probeWithNodeRequest(target, timeoutMs) { - const requestImpl = target.protocol === "https" ? httpsRequest : httpRequest; - return await new Promise((resolveProbe, rejectProbe) => { - const req = requestImpl( - { - protocol: target.protocol === "https" ? "https:" : "http:", - host: target.host, - port: target.port, - path: target.path, - method: "GET", - timeout: timeoutMs, - headers: { accept: "application/json" }, - }, - (res) => { - const chunks = []; - res.on("data", (chunk) => chunks.push(chunk)); - res.on("end", () => { - const text = Buffer.concat(chunks).toString("utf8"); - let payload = null; - try { - payload = text ? JSON.parse(text) : null; - } catch { - payload = text ? { raw: text } : null; - } - resolveProbe({ - ok: Number(res.statusCode || 0) >= 200 && Number(res.statusCode || 0) < 300, - status: Number(res.statusCode || 0), - text: async () => text, - payload, - }); - }); - }, - ); - req.on("error", rejectProbe); - req.on("timeout", () => { - const error = new Error("aborted"); - error.name = "AbortError"; - try { req.destroy(error); } catch { /* best effort */ } - }); - req.end(); - }); -} - -async function readTail(filePath, { maxLines = DEFAULT_TAIL_LINES, maxBytes = DEFAULT_TAIL_BYTES } = {}) { - try { - if (!existsSync(filePath)) return ""; - const handle = await open(filePath, "r"); - try { - const info = await handle.stat(); - const size = Number(info?.size || 0); - if (!Number.isFinite(size) || size <= 0) return ""; - const length = Math.max(1, Math.min(size, clampPositiveNumber(maxBytes, DEFAULT_TAIL_BYTES))); - const offset = Math.max(0, size - length); - const buffer = Buffer.alloc(length); - await handle.read(buffer, 0, length, offset); - let text = buffer.toString("utf8"); - if (offset > 0) { - const firstNewline = text.indexOf("\n"); - if (firstNewline >= 0) text = text.slice(firstNewline + 1); - } - const lines = text.split(/\r?\n/).filter(Boolean); - return lines.slice(-Math.max(1, clampPositiveNumber(maxLines, DEFAULT_TAIL_LINES))).join("\n"); - } finally { - await handle.close().catch(() => {}); - } - } catch { - return ""; - } -} - -async function readCorrelatedLogContext(logDir, options = {}) { - const monitorLogTail = await readTail(resolve(String(logDir || process.cwd()), "monitor.log"), options); - const monitorErrorLogTail = await readTail(resolve(String(logDir || process.cwd()), "monitor-error.log"), options); - return { - monitorLogTail, - monitorErrorLogTail, - }; -} - -export function createHeartbeatMonitor(options = {}) { - const configDir = String(options.configDir || process.cwd()); - const logDir = String(options.logDir || process.cwd()); - const fetchImpl = typeof options.fetchImpl === "function" ? options.fetchImpl : null; - const logger = options.logger && typeof options.logger === "object" ? options.logger : console; - const host = String(options.host || "127.0.0.1"); - const healthPath = String(options.healthPath || "/healthz"); - const intervalMs = clampPositiveNumber(options.intervalMs, DEFAULT_INTERVAL_MS, 1000); - const timeoutMs = clampPositiveNumber(options.timeoutMs, DEFAULT_TIMEOUT_MS, 100); - const successLogIntervalMs = clampPositiveNumber( - options.successLogIntervalMs, - DEFAULT_SUCCESS_LOG_INTERVAL_MS, - 1000, - ); - const eventLoopWarnMs = clampPositiveNumber( - options.eventLoopWarnMs, - DEFAULT_EVENT_LOOP_WARN_MS, - 1, - ); - const correlationOptions = { - maxLines: clampPositiveNumber(options.correlationMaxLines, DEFAULT_TAIL_LINES, 1), - maxBytes: clampPositiveNumber(options.correlationMaxBytes, DEFAULT_TAIL_BYTES, 1024), - }; - - let timer = null; - let running = false; - let inflight = null; - let lastOutcome = ""; - let lastSuccessLogAt = 0; - const histogram = monitorEventLoopDelay({ resolution: 20 }); - const state = { - lastProbeAt: 0, - lastSuccessAt: 0, - lastFailureAt: 0, - lastOutcome: "not_started", - lastError: "", - }; - - const logPath = resolveHeartbeatLogPath(logDir); - - function appendLog(entry) { - try { - mkdirSync(dirname(logPath), { recursive: true }); - appendFileSync(logPath, `${JSON.stringify(entry)}\n`, "utf8"); - } catch { - // best effort - } - } - - async function probeNow(trigger = "interval") { - if (inflight) return inflight; - inflight = (async () => { - const startedAt = Date.now(); - const eventLoop = summarizeEventLoopDelay(histogram); - histogram.reset(); - const target = readUiProbeTarget(configDir, options.host || host, healthPath); - const port = Number(target?.port || 0); - const baseEntry = { - ts: new Date(startedAt).toISOString(), - trigger, - port, - url: target?.url || null, - timeoutMs, - eventLoop, - }; - - if (!port) { - state.lastProbeAt = startedAt; - state.lastFailureAt = startedAt; - state.lastOutcome = "port_missing"; - state.lastError = "ui_last_port_missing"; - const outcome = "port_missing"; - if (outcome !== lastOutcome) { - appendLog({ - ...baseEntry, - level: "warn", - outcome, - error: "No persisted UI port found for heartbeat probe", - }); - lastOutcome = outcome; - } - return { ok: false, outcome }; - } - - const controller = new AbortController(); - const timeout = setTimeout(() => controller.abort(), timeoutMs); - if (typeof timeout.unref === "function") timeout.unref(); - - let outcome = "ok"; - let level = "info"; - let statusCode = 0; - let payload = null; - let errorMessage = ""; - try { - const response = typeof fetchImpl === "function" - ? await fetchImpl(target.url, { - signal: controller.signal, - headers: { accept: "application/json" }, - }) - : await probeWithNodeRequest( - { - protocol: target.protocol, - host: target.host, - port: target.port, - path: String(healthPath || "/healthz").startsWith("/") - ? String(healthPath || "/healthz") - : `/${String(healthPath || "healthz")}`, - }, - timeoutMs, - ); - statusCode = Number(response?.status || 0); - if (response?.payload !== undefined) { - payload = response.payload; - } else { - const text = await response.text(); - try { - payload = text ? JSON.parse(text) : null; - } catch { - payload = text ? { raw: text } : null; - } - } - if (!response.ok) { - outcome = "http_error"; - level = "warn"; - } else if (String(payload?.status || "").trim().toLowerCase() === "degraded") { - outcome = "degraded"; - level = "warn"; - } - } catch (error) { - errorMessage = String(error?.message || error || ""); - outcome = isAbortTimeoutError(error) ? "timeout" : "fetch_error"; - level = "error"; - } finally { - clearTimeout(timeout); - } - - const durationMs = Date.now() - startedAt; - const lagWarning = eventLoop.maxMs >= eventLoopWarnMs || eventLoop.p99Ms >= eventLoopWarnMs; - const needsCorrelation = outcome !== "ok" || lagWarning; - const correlatedLogs = needsCorrelation - ? await readCorrelatedLogContext(logDir, correlationOptions) - : null; - const recovered = outcome === "ok" && lastOutcome && lastOutcome !== "ok"; - const shouldLogSuccess = - outcome === "ok" && - (recovered || startedAt - lastSuccessLogAt >= successLogIntervalMs || lagWarning); - const shouldLog = outcome !== "ok" || shouldLogSuccess || outcome !== lastOutcome; - - state.lastProbeAt = startedAt; - state.lastOutcome = outcome; - state.lastError = errorMessage; - if (outcome === "ok") { - state.lastSuccessAt = startedAt; - lastSuccessLogAt = startedAt; - } else { - state.lastFailureAt = startedAt; - } - - if (shouldLog) { - appendLog({ - ...baseEntry, - durationMs, - level, - outcome: recovered ? "recovered" : outcome, - previousOutcome: lastOutcome || null, - statusCode: statusCode || null, - payload, - error: errorMessage || null, - lagWarning, - correlatedLogs, - }); - } - lastOutcome = outcome; - - if (outcome !== "ok" && typeof logger?.warn === "function") { - logger.warn( - `[heartbeat] ${outcome} ${baseEntry.url || "(missing url)"} after ${durationMs}ms`, - ); - } - - return { - ok: outcome === "ok", - outcome, - durationMs, - statusCode, - payload, - error: errorMessage || null, - lagWarning, - }; - })().finally(() => { - inflight = null; - }); - - return inflight; - } - - return { - start() { - if (running) return; - running = true; - histogram.enable(); - timer = setInterval(() => { - void probeNow("interval"); - }, intervalMs); - if (typeof timer.unref === "function") timer.unref(); - void probeNow("startup"); - }, - stop() { - running = false; - if (timer) { - clearInterval(timer); - timer = null; - } - histogram.disable(); - }, - probeNow, - getSnapshot() { - return { - ...state, - running, - intervalMs, - timeoutMs, - logPath, - }; - }, - }; -} diff --git a/infra/library-manager.mjs b/infra/library-manager.mjs index 7e40c19df..42afabcbd 100644 --- a/infra/library-manager.mjs +++ b/infra/library-manager.mjs @@ -313,21 +313,6 @@ export function getBosunHomeDir() { return resolve(homedir(), ".bosun"); } -const UNRESOLVED_TEMPLATE_TOKEN_RE = /\{\{[^{}]+\}\}/; - -export function hasUnresolvedTemplateTokens(value) { - return UNRESOLVED_TEMPLATE_TOKEN_RE.test(String(value || "")); -} - -function resolveLibraryRootDir(rootDir) { - const raw = String(rootDir || "").trim(); - if (!raw) return getBosunHomeDir(); - if (hasUnresolvedTemplateTokens(raw)) { - throw new Error(`Invalid library root path \"${raw}\": unresolved template token detected`); - } - return resolve(raw); -} - function ensureDir(dir) { mkdirSync(dir, { recursive: true }); return dir; @@ -587,7 +572,7 @@ function buildSkillIndexRevision(manifest) { } function updateIndexCache(cache, rootDir, index, manifestMtimeMs = 0) { - const cacheKey = resolveLibraryRootDir(rootDir); + const cacheKey = resolve(rootDir || getBosunHomeDir()); const payload = { ...index, count: Array.isArray(index?.profiles) @@ -1027,19 +1012,19 @@ function resolveToolSelection(rootDir, best) { } export function getLibraryIndexDir(rootDir) { - return resolve(resolveLibraryRootDir(rootDir), LIBRARY_INDEX_DIR); + return resolve(rootDir || getBosunHomeDir(), LIBRARY_INDEX_DIR); } export function getAgentProfileIndexPath(rootDir) { - return resolve(resolveLibraryRootDir(rootDir), LIBRARY_INDEX_DIR, AGENT_PROFILE_INDEX); + return resolve(rootDir || getBosunHomeDir(), LIBRARY_INDEX_DIR, AGENT_PROFILE_INDEX); } export function getSkillEntryIndexPath(rootDir) { - return resolve(resolveLibraryRootDir(rootDir), LIBRARY_INDEX_DIR, SKILL_ENTRY_INDEX); + return resolve(rootDir || getBosunHomeDir(), LIBRARY_INDEX_DIR, SKILL_ENTRY_INDEX); } export function rebuildAgentProfileIndex(rootDir, manifest = loadManifest(rootDir)) { - const normalizedRoot = resolveLibraryRootDir(rootDir); + const normalizedRoot = resolve(rootDir || getBosunHomeDir()); const profiles = (manifest?.entries || []) .filter((entry) => entry?.type === "agent") .map((entry) => buildIndexedAgentProfile(normalizedRoot, entry)) @@ -1056,7 +1041,7 @@ export function rebuildAgentProfileIndex(rootDir, manifest = loadManifest(rootDi } export function loadAgentProfileIndex(rootDir, options = {}) { - const normalizedRoot = resolveLibraryRootDir(rootDir); + const normalizedRoot = resolve(rootDir || getBosunHomeDir()); const manifestPath = getManifestPath(normalizedRoot); const manifestMtimeMs = getFileMtimeMs(manifestPath); const cacheEntry = agentProfileIndexCache.get(normalizedRoot); @@ -1097,7 +1082,7 @@ export function listIndexedAgentProfiles(rootDir, options = {}) { } export function rebuildSkillEntryIndex(rootDir, manifest = loadManifest(rootDir)) { - const normalizedRoot = resolveLibraryRootDir(rootDir); + const normalizedRoot = resolve(rootDir || getBosunHomeDir()); const skills = (manifest?.entries || []) .filter((entry) => entry?.type === "skill") .map((entry) => buildIndexedSkillEntry(entry)); @@ -1127,7 +1112,7 @@ export function rebuildSkillEntryIndex(rootDir, manifest = loadManifest(rootDir) } export function loadSkillEntryIndex(rootDir, options = {}) { - const normalizedRoot = resolveLibraryRootDir(rootDir); + const normalizedRoot = resolve(rootDir || getBosunHomeDir()); const manifestPath = getManifestPath(normalizedRoot); const manifestMtimeMs = getFileMtimeMs(manifestPath); const cacheEntry = skillEntryIndexCache.get(normalizedRoot); @@ -1257,7 +1242,7 @@ export function resolveLibraryPlan(rootDir, criteria = {}, opts = {}) { * Get the manifest path for a workspace (or global). */ export function getManifestPath(rootDir) { - return resolve(resolveLibraryRootDir(rootDir), ".bosun", LIBRARY_MANIFEST); + return resolve(rootDir || getBosunHomeDir(), ".bosun", LIBRARY_MANIFEST); } /** @@ -1285,7 +1270,7 @@ export function saveManifest(rootDir, manifest) { // ── CRUD operations ────────────────────────────────────────────────────────── function dirForType(rootDir, type) { - const root = resolveLibraryRootDir(rootDir); + const root = rootDir || getBosunHomeDir(); switch (type) { case "prompt": return resolve(root, PROMPT_DIR); case "skill": return resolve(root, SKILL_DIR); @@ -2170,7 +2155,7 @@ function discoverMcpServersFromCodexConfig(rootDir) { * them into the library manifest/filesystem. */ export function syncAutoDiscoveredLibraryEntries(rootDir) { - const root = resolveLibraryRootDir(rootDir); + const root = rootDir || getBosunHomeDir(); const manifestSnapshot = loadManifest(root); const existingEntries = Array.isArray(manifestSnapshot?.entries) ? manifestSnapshot.entries diff --git a/infra/maintenance.mjs b/infra/maintenance.mjs index 572b4390a..2ee2e01d1 100644 --- a/infra/maintenance.mjs +++ b/infra/maintenance.mjs @@ -476,19 +476,12 @@ export function cleanupStaleBranches(repoRoot, opts = {}) { // 3. List all local branches let localBranches; try { - let r = spawnSync( + const r = spawnSync( "git", - ["for-each-ref", "refs/heads/", "--format=%(refname:short)"], + ["for-each-ref", "--format=%(refname:short)", "refs/heads/"], { cwd: repoRoot, encoding: "utf8", timeout: 10000, windowsHide: true }, ); - if (!r || typeof r.status === "undefined") { - r = spawnSync( - "git", - ["for-each-ref"], - { cwd: repoRoot, encoding: "utf8", timeout: 10000, windowsHide: true }, - ); - } - if (!r || r.status !== 0 || !r.stdout) return result; + if (r.status !== 0 || !r.stdout) return result; localBranches = r.stdout.trim().split("\n").filter(Boolean); } catch (e) { result.errors.push(`Failed to list branches: ${e.message}`); @@ -549,7 +542,7 @@ export function cleanupStaleBranches(repoRoot, opts = {}) { const remoteExists = spawnSync( "git", ["rev-parse", "--verify", `refs/remotes/${remoteRef}`], - { cwd: repoRoot, encoding: "utf8", timeout: 5000, windowsHide: true }, + { cwd: repoRoot, timeout: 5000, windowsHide: true }, ); if (remoteExists.status === 0) { @@ -1052,7 +1045,7 @@ export function syncLocalTrackingBranches(repoRoot, branches) { const refCheck = spawnSync( "git", ["rev-parse", "--verify", `refs/heads/${branch}`], - { cwd: repoRoot, encoding: "utf8", timeout: 5000, windowsHide: true }, + { cwd: repoRoot, timeout: 5000, windowsHide: true }, ); if (refCheck.status !== 0) { // Local branch doesn't exist — nothing to sync @@ -1064,7 +1057,7 @@ export function syncLocalTrackingBranches(repoRoot, branches) { const remoteCheck = spawnSync( "git", ["rev-parse", "--verify", `refs/remotes/${remoteRef}`], - { cwd: repoRoot, encoding: "utf8", timeout: 5000, windowsHide: true }, + { cwd: repoRoot, timeout: 5000, windowsHide: true }, ); if (remoteCheck.status !== 0) continue; @@ -1114,7 +1107,7 @@ export function syncLocalTrackingBranches(repoRoot, branches) { const update = spawnSync( "git", ["update-ref", `refs/heads/${branch}`, `refs/remotes/${remoteRef}`], - { cwd: repoRoot, encoding: "utf8", timeout: 5000, windowsHide: true }, + { cwd: repoRoot, timeout: 5000, windowsHide: true }, ); if (update.status === 0) { logThrottledBranchSync( @@ -1265,7 +1258,7 @@ export function syncLocalTrackingBranches(repoRoot, branches) { const update = spawnSync( "git", ["update-ref", `refs/heads/${branch}`, `refs/remotes/${remoteRef}`], - { cwd: repoRoot, encoding: "utf8", timeout: 5000, windowsHide: true }, + { cwd: repoRoot, timeout: 5000, windowsHide: true }, ); if (update.status === 0) { logThrottledBranchSync( @@ -1415,6 +1408,3 @@ export async function runMaintenanceSweep(opts = {}) { return result; } - - - diff --git a/infra/monitor.mjs b/infra/monitor.mjs index 5c7f672b5..29ccd77fc 100644 --- a/infra/monitor.mjs +++ b/infra/monitor.mjs @@ -1,4 +1,4 @@ -import { execSync, spawn, spawnSync, exec } from "node:child_process"; +import { execSync, spawn, spawnSync } from "node:child_process"; import { createHash, randomUUID } from "node:crypto"; import { existsSync, @@ -35,33 +35,6 @@ if (typeof net.setDefaultAutoSelectFamilyAttemptTimeout === "function") { net.setDefaultAutoSelectFamilyAttemptTimeout(2000); } -const heartbeatRuntimeState = { - current: null, -}; - -/** - * Non-blocking async shell exec — avoids blocking the HTTP server event loop. - * Use instead of execSync/spawnSync in timer callbacks and request handlers. - * @param {string} cmd - Shell command string - * @param {{ cwd?: string, timeout?: number, encoding?: string }} [opts] - * @returns {Promise} stdout on success, rejects with Error on non-zero exit - */ -function execAsync(cmd, { cwd, timeout = 30_000, encoding = "utf8" } = {}) { - return new Promise((resolve, reject) => { - const child = exec(cmd, { cwd, timeout, encoding, windowsHide: true }); - let stdout = ""; - let stderr = ""; - child.stdout?.on("data", (d) => { stdout += d; }); - child.stderr?.on("data", (d) => { stderr += d; }); - child.on("close", (code) => { - if (code === 0 || code === null) resolve(stdout); - else reject(Object.assign(new Error(`exec failed (${code}): ${cmd}`), { stdout, stderr, exitCode: code })); - }); - child.on("error", reject); - }); -} - - import { acquireMonitorLock } from "./maintenance.mjs"; import { @@ -112,7 +85,6 @@ import { import { loadConfig } from "../config/config.mjs"; import { formatPreflightReport, runPreflightChecks } from "./preflight.mjs"; import { startAutoUpdateLoop, stopAutoUpdateLoop } from "./update-check.mjs"; -import { createHeartbeatMonitor } from "./heartbeat-monitor.mjs"; import { isWhatsAppEnabled, startWhatsAppChannel, @@ -279,7 +251,6 @@ import { import { resolvePromptTemplate } from "../agent/agent-prompts.mjs"; import { resolveCodexProfileRuntime } from "../shell/codex-model-profiles.mjs"; import { sanitizeMonitorTailForPrompt as sanitizeMonitorTailForPromptShared } from "../monitor-tail-sanitizer.mjs"; -import { webhookEvents as _githubWebhookEvents } from "../github/github-oauth-portal.mjs"; const __dirname = resolve(fileURLToPath(new URL(".", import.meta.url))); // ── Anomaly signal file path (shared with orchestrator) ────────────────────── @@ -1011,11 +982,7 @@ async function ensureWorkflowAutomationEngine() { "template-task-lifecycle", "template-task-finalization-guard", "template-agent-session-monitor", - "template-bosun-pr-watchdog", - "template-bosun-pr-progressor", "template-github-kanban-sync", - "template-recover-blocked-task", - "template-recover-blocked-worktrees", ], }); if (Number(reconcile?.autoUpdated || 0) > 0) { @@ -1087,6 +1054,12 @@ async function ensureWorkflowAutomationEngine() { } } + // Resume runs paused by a previous monitor shutdown after services are wired. + if (typeof engine.resumeInterruptedRuns === "function") { + engine.resumeInterruptedRuns().catch((err) => { + console.warn(`[workflows] Failed to resume interrupted runs: ${err?.message || err}`); + }); + } workflowAutomationInitDone = true; return engine; } catch (err) { @@ -1108,13 +1081,13 @@ async function ensureWorkflowAutomationEngine() { async function dispatchWorkflowEvent(eventType, eventData = {}, opts = {}) { try { - if (!workflowAutomationEnabled && !opts?.engine) return false; + if (!workflowAutomationEnabled) return false; const dedupKey = String(opts?.dedupKey || "").trim(); if (dedupKey && !allowWorkflowEvent(dedupKey)) { return false; } - const engine = opts?.engine || await ensureWorkflowAutomationEngine(); + const engine = await ensureWorkflowAutomationEngine(); if (!engine?.evaluateTriggers || !engine?.execute) return false; const payload = buildWorkflowEventPayload(eventType, eventData); @@ -1131,8 +1104,7 @@ async function dispatchWorkflowEvent(eventType, eventData = {}, opts = {}) { if (!Array.isArray(triggered) || triggered.length === 0) { return false; } - const awaitRuns = opts?.awaitRuns === true; - const runPromises = []; + for (const match of triggered) { const workflowId = String(match?.workflowId || "").trim(); if (!workflowId) continue; @@ -1140,7 +1112,7 @@ async function dispatchWorkflowEvent(eventType, eventData = {}, opts = {}) { ...payload, _triggeredBy: match?.triggeredBy || null, }; - const runPromise = engine + void engine .execute(workflowId, runPayload) .then((ctx) => { const runId = ctx?.id || "unknown"; @@ -1151,34 +1123,17 @@ async function dispatchWorkflowEvent(eventType, eventData = {}, opts = {}) { console.log( `[workflows] auto-run ${runStatus} workflow=${workflowId} runId=${runId} event=${eventType}`, ); - return { workflowId, runId, runStatus, ctx }; }) .catch((err) => { console.warn( `[workflows] auto-run failed workflow=${workflowId} event=${eventType}: ${err?.message || err}`, ); - if (awaitRuns) { - throw err; - } - return null; }); - if (awaitRuns) { - runPromises.push(runPromise); - } else { - void runPromise; - } } console.log( `[workflows] event "${eventType}" triggered ${triggered.length} workflow run(s)`, ); - if (awaitRuns) { - return { - triggered: true, - triggeredCount: triggered.length, - runs: (await Promise.all(runPromises)).filter(Boolean), - }; - } return true; } catch (err) { console.warn(`[workflows] dispatchWorkflowEvent error for ${eventType}: ${err?.message || err}`); @@ -1190,67 +1145,6 @@ function queueWorkflowEvent(eventType, eventData = {}, opts = {}) { dispatchWorkflowEvent(eventType, eventData, opts).catch(() => {}); } -// ── GitHub webhook → workflow-engine bridge ────────────────────────────────── -// Forwards GitHub App webhook events from the OAuth portal's EventEmitter into -// the workflow trigger engine so that trigger.event nodes with -// eventType: "github:*" fire in real-time instead of requiring polling. - -const _GITHUB_BRIDGE_EVENTS = [ - "github:pull_request", - "github:pull_request_review", - "github:pull_request_review_comment", - "github:check_run", - "github:check_suite", - "github:push", - "github:issue_comment", - "github:status", - "github:workflow_run", - "github:workflow_job", - "github:installation", - "github:installation_repositories", -]; - -let _githubWebhookBridgeSetup = false; - -function setupGitHubWebhookBridge() { - if (_githubWebhookBridgeSetup) return; - _githubWebhookBridgeSetup = true; - try { - for (const eventType of _GITHUB_BRIDGE_EVENTS) { - _githubWebhookEvents.on(eventType, ({ action, payload } = {}) => { - const pr = payload?.pull_request; - const cr = payload?.check_run; - queueWorkflowEvent(eventType, { - eventType, - action: action ?? null, - repo: payload?.repository?.full_name ?? null, - // PR fields (available in pull_request, review, and review_comment events) - prNumber: pr?.number ?? payload?.number ?? cr?.pull_requests?.[0]?.number ?? null, - prTitle: pr?.title ?? null, - prBranch: pr?.head?.ref ?? cr?.head_branch ?? null, - prBaseBranch: pr?.base?.ref ?? null, - prAuthor: pr?.user?.login ?? null, - prUrl: pr?.html_url ?? null, - // Check run fields - checkName: cr?.name ?? null, - checkConclusion: cr?.conclusion ?? null, - checkStatus: cr?.status ?? null, - // Review fields - reviewState: payload?.review?.state ?? null, - // Push / ref fields - ref: payload?.ref ?? null, - commitSha: payload?.after ?? payload?.head_commit?.id ?? null, - // Full payload available for complex filter expressions - payload, - }); - }); - } - console.log("[workflows] GitHub webhook bridge active — forwarding " + _GITHUB_BRIDGE_EVENTS.length + " event type(s)"); - } catch (err) { - console.warn(`[workflows] GitHub webhook bridge setup failed: ${err?.message || err}`); - } -} - function normalizePromptBody(value) { return typeof value === "string" ? value.trim() : ""; } @@ -2213,10 +2107,6 @@ workflowAutomationEnabled = parseEnvBoolean( ? dedupMs : 15_000; } -// Wire real-time GitHub webhook events into the workflow trigger engine. -// queueWorkflowEvent is a no-op when automation is disabled, so this is safe -// to call unconditionally at module init time. -if (!process.env.VITEST) setupGitHubWebhookBridge(); // Initialize runtime accumulator for persistent stats across restarts const runtimeStats = initRuntimeAccumulator(); @@ -3254,10 +3144,6 @@ const SELF_RESTART_FORCE_ACTIVE_SLOT_MIN_AGE_MS = Math.max( String(SELF_RESTART_MAX_DEFER_MS), ) || SELF_RESTART_MAX_DEFER_MS, ); -const RUNTIME_RESTART_REQUEST_POLL_MS = Math.max( - 1_000, - Number(process.env.BOSUN_RESTART_REQUEST_POLL_MS || "2000") || 2000, -); let selfWatcher = null; let selfWatcherLib = null; let selfWatcherExtra = []; // watchers for sibling source dirs (task/, workspace/, etc.) @@ -3332,15 +3218,10 @@ function buildCodexSdkOptionsForMonitor() { } // ── Self-restart marker: detect if this process was spawned by a code-change restart -const runtimeRestartRequestPath = resolve( - config.cacheDir || resolve(config.repoRoot, ".cache"), - "bosun-restart-request.json", -); const selfRestartMarkerPath = resolve( config.cacheDir || resolve(config.repoRoot, ".cache"), "ve-self-restart.marker", ); -let lastHandledRuntimeRestartRequestId = ""; let isSelfRestart = false; try { if (existsSync(selfRestartMarkerPath)) { @@ -3380,13 +3261,9 @@ function getTelegramBotStartOptions() { const restartReason = isSelfRestart ? "self-restart" : monitorRestartReason; - const allowDaemonPortalAutoOpen = isTruthyFlag( - process.env.BOSUN_UI_AUTO_OPEN_ON_DAEMON, - ); return { restartReason, - suppressPortalAutoOpen: - restartReason.length > 0 || !allowDaemonPortalAutoOpen, + suppressPortalAutoOpen: restartReason.length > 0, }; } @@ -3590,83 +3467,6 @@ async function ensurePreflightReady(reason) { return true; } -function maybeHandleQueuedRuntimeRestartRequest(trigger = "interval") { - if (shuttingDown) return false; - if (!existsSync(runtimeRestartRequestPath)) return false; - - let payload = null; - try { - payload = JSON.parse(readFileSync(runtimeRestartRequestPath, "utf8")); - } catch (err) { - console.warn( - `[monitor] invalid runtime restart request at ${runtimeRestartRequestPath}: ${err?.message || err}`, - ); - try { - unlinkSync(runtimeRestartRequestPath); - } catch { - /* best effort */ - } - return false; - } - - const requestId = String( - payload?.id || - `${payload?.requestedAt || ""}:${payload?.requesterPid || ""}:${payload?.reason || ""}:${payload?.targetPid || ""}`, - ).trim(); - const requestType = String(payload?.type || "code-reload").trim().toLowerCase(); - const targetPid = Number(payload?.targetPid || 0); - - if (requestId && requestId === lastHandledRuntimeRestartRequestId) { - try { - unlinkSync(runtimeRestartRequestPath); - } catch { - /* best effort */ - } - return false; - } - - if (requestType !== "code-reload") { - console.warn( - `[monitor] ignoring runtime restart request with unsupported type "${requestType}"`, - ); - try { - unlinkSync(runtimeRestartRequestPath); - } catch { - /* best effort */ - } - return false; - } - - if (targetPid > 0 && targetPid !== process.pid) { - console.warn( - `[monitor] ignoring runtime restart request for pid ${targetPid} (current pid ${process.pid})`, - ); - try { - unlinkSync(runtimeRestartRequestPath); - } catch { - /* best effort */ - } - return false; - } - - try { - unlinkSync(runtimeRestartRequestPath); - } catch { - /* best effort */ - } - lastHandledRuntimeRestartRequestId = requestId || `restart-${Date.now()}`; - - const reason = String(payload?.reason || "external-restart-request").trim() || - "external-restart-request"; - const requesterPid = Number(payload?.requesterPid || 0); - const requesterLabel = requesterPid > 0 ? ` from pid ${requesterPid}` : ""; - console.warn( - `[monitor] queued runtime reload requested (${reason})${requesterLabel} via ${trigger}; preserving current launch provenance.`, - ); - restartSelf(`queued-runtime-restart:${reason}`); - return true; -} - function restartSelf(reason) { if (shuttingDown) return; const protection = getRuntimeRestartProtection(); @@ -3710,7 +3510,6 @@ function restartSelf(reason) { ), ); } - stopHeartbeatMonitor(); stopAutoUpdateLoop(); stopAgentAlertTailer(); stopAgentWorkAnalyzer(); @@ -4114,27 +3913,7 @@ function safeSetInterval(reason, fn, ms) { `[monitor] timer delay clamped for interval:${reason} (${normalized}ms -> ${clamped}ms)`, ); } - let inFlight = false; - return setInterval(() => { - if (inFlight) return; - inFlight = true; - runGuarded(`interval:${reason}`, () => { - let result; - try { - result = fn(); - } catch (err) { - inFlight = false; - throw err; - } - if (result && typeof result.then === "function") { - return Promise.resolve(result).finally(() => { - inFlight = false; - }); - } - inFlight = false; - return result; - }); - }, clamped); + return setInterval(() => runGuarded(`interval:${reason}`, fn), clamped); } function safeSetTimeout(reason, fn, ms) { @@ -6122,21 +5901,9 @@ async function checkMergedPRsAndUpdateTasks() { const taskStatus = String(task?.status || "").trim().toLowerCase(); const allowsMergedRecovery = taskStatus === "todo" || taskStatus === "inprogress"; - const reviewStatus = String(task?.reviewStatus || "").trim().toLowerCase(); - const nowMs = Date.now(); - const updatedAt = Date.parse(task?.updatedAt || task?.updated_at || ""); - const ageMs = Number.isFinite(updatedAt) ? nowMs - updatedAt : Infinity; - const reviewVerdictCurrent = hasCurrentReviewVerdict(task); - const shouldRedispatchRejectedReview = - taskStatus === "inreview" && - reviewStatus === "changes_requested" && - reviewVerdictCurrent && - ageMs > 2 * 60 * 1000 && - !hasActiveSession(taskId) && - !isReviewRedispatchCoolingDown(taskId, nowMs); const approved = - reviewStatus === "approved" || + String(task?.reviewStatus || "").trim().toLowerCase() === "approved" || isTaskReviewApprovedForFlow(taskId); // inreview tasks must always be checked — their PR may have merged even if // the review-approval flag was never set (e.g. FLOW_REQUIRE_REVIEW=false or @@ -6178,19 +5945,9 @@ async function checkMergedPRsAndUpdateTasks() { // bouncing them back to todo. Allow a grace period to avoid racing a // freshly created PR that is not discoverable yet. if (allowsInreviewMergeCheck) { - if (shouldRedispatchRejectedReview) { - console.warn( - `[monitor] review reconcile: inreview task ${taskId} still has current changes_requested findings — re-dispatching remediation`, - ); - redispatchInReviewTask(task, "review-reconcile-changes-requested", { - workflowEvent: "task.review_fix_requested", - }); - summary.redispatchedReviewFix = (summary.redispatchedReviewFix || 0) + 1; - } else if ( - ageMs > 2 * 60 * 1000 && - !reviewVerdictCurrent && - !isReviewRedispatchCoolingDown(taskId, nowMs) - ) { + const updatedAt = Date.parse(task?.updatedAt || task?.updated_at || ""); + const ageMs = Number.isFinite(updatedAt) ? Date.now() - updatedAt : Infinity; + if (ageMs > 2 * 60 * 1000) { console.warn( `[monitor] review reconcile: inreview task ${taskId} has no discoverable PR — re-dispatching inreview repair`, ); @@ -6209,20 +5966,7 @@ async function checkMergedPRsAndUpdateTasks() { const prState = String(prInfo?.state || "").trim().toUpperCase(); const mergedAt = String(prInfo?.mergedAt || prInfo?.merged_at || "").trim(); const isMerged = prState === "MERGED" || Boolean(mergedAt); - if (!isMerged) { - if (shouldRedispatchRejectedReview) { - console.warn( - `[monitor] review reconcile: inreview task ${taskId} still has current changes_requested findings — re-dispatching remediation`, - ); - redispatchInReviewTask(task, "review-reconcile-changes-requested", { - prNumber, - prUrl, - workflowEvent: "task.review_fix_requested", - }); - summary.redispatchedReviewFix = (summary.redispatchedReviewFix || 0) + 1; - } - continue; - } + if (!isMerged) continue; const recoverySuffix = allowsMergedRecovery ? ` (status=${taskStatus || "unknown"})` @@ -6423,26 +6167,30 @@ function parseGhJsonResult(raw, fallback = []) { } } -async function readEpicPrInfo(headBranch, baseBranch) { +function readEpicPrInfo(headBranch, baseBranch) { const slug = getRepoSlugForEpic(); if (!slug || !ghAvailable()) return null; const { headInfo, baseInfo } = summarizeEpicBranch(headBranch, baseBranch); try { const listCmd = `gh pr list --repo ${slug} --head "${headInfo.name}" --base "${baseInfo.name}" --state all --json number,state,url,mergedAt`; - const listResult = (await execAsync(listCmd, { + const listResult = execSync(listCmd, { cwd: repoRoot, + encoding: "utf8", timeout: 20_000, - })).trim(); + stdio: ["pipe", "pipe", "ignore"], + }).trim(); const entries = parseGhJsonResult(listResult, []); if (!entries.length) return null; const pr = entries[0]; let detail = {}; try { const viewCmd = `gh pr view ${pr.number} --repo ${slug} --json number,state,url,mergeable,mergeable_state,mergeStateStatus,baseRefName,headRefName`; - const viewResult = (await execAsync(viewCmd, { + const viewResult = execSync(viewCmd, { cwd: repoRoot, + encoding: "utf8", timeout: 20_000, - })).trim(); + stdio: ["pipe", "pipe", "ignore"], + }).trim(); detail = parseGhJsonResult(viewResult, {}); } catch { /* best-effort */ @@ -6484,10 +6232,12 @@ async function readRequiredChecks(prNumber) { if (!slug || !ghAvailable() || !prNumber) return []; try { const checksCmd = `gh pr checks ${prNumber} --repo ${slug} --json name,state --required`; - const checksResult = (await execAsync(checksCmd, { + const checksResult = execSync(checksCmd, { cwd: repoRoot, + encoding: "utf8", timeout: 20_000, - })).trim(); + stdio: ["pipe", "pipe", "ignore"], + }).trim(); return parseGhJsonResult(checksResult, []); } catch { return []; @@ -6879,7 +6629,7 @@ async function checkEpicBranches(reason = "interval") { continue; } - let prInfo = await readEpicPrInfo(epicBranch, DEFAULT_TARGET_BRANCH); + let prInfo = readEpicPrInfo(epicBranch, DEFAULT_TARGET_BRANCH); if (!prInfo || prInfo.state === "CLOSED") { const created = await createEpicMergePr( epicBranch, @@ -7194,15 +6944,6 @@ function getReviewGateSnapshot(taskId) { } } -function hasCurrentReviewVerdict(task) { - const reviewStatus = String(task?.reviewStatus || "").trim().toLowerCase(); - if (!["approved", "changes_requested"].includes(reviewStatus)) { - return false; - } - const reviewedAtMs = Date.parse(String(task?.reviewedAt || "")); - return Number.isFinite(reviewedAtMs); -} - function isTaskReviewApprovedForFlow(taskId) { const snapshot = getReviewGateSnapshot(taskId); return snapshot?.approved === true; @@ -7268,17 +7009,6 @@ async function queueFlowReview(taskId, ctx, reason = "") { function redispatchInReviewTask(task, reason, extra = {}) { const taskId = String(task?.id || "").trim(); if (!taskId) return false; - const normalizedReason = String(reason || "inreview_redispatch").trim() || "inreview_redispatch"; - const eventType = String(extra.workflowEvent || "task.assigned").trim() || "task.assigned"; - const now = Date.now(); - const existing = reviewRedispatchCooldownByTask.get(taskId); - if (existing && now - existing.at < REVIEW_REDISPATCH_COOLDOWN_MS) { - return false; - } - reviewRedispatchCooldownByTask.set(taskId, { - at: now, - reason: normalizedReason, - }); const taskTitle = String(task?.title || taskId).trim() || taskId; const branch = String(task?.branchName || task?.branch || extra.branch || "").trim() || null; const worktreePath = String(task?.worktreePath || task?.meta?.worktreePath || extra.worktreePath || "").trim() || null; @@ -7288,11 +7018,8 @@ function redispatchInReviewTask(task, reason, extra = {}) { parsePositivePrNumber(task?.pr_number) || null; const prUrl = String(extra.prUrl || task?.prUrl || task?.pr_url || "").trim() || null; - const reviewIssues = Array.isArray(extra.reviewIssues) - ? extra.reviewIssues - : (Array.isArray(task?.reviewIssues) ? task.reviewIssues : []); queueWorkflowEvent( - eventType, + "task.assigned", { taskId, taskTitle, @@ -7301,28 +7028,14 @@ function redispatchInReviewTask(task, reason, extra = {}) { worktreePath, prNumber, prUrl, - reviewStatus: String(task?.reviewStatus || "").trim() || null, - reviewIssues, - reviewIssueCount: reviewIssues.length, - reviewRedispatchReason: normalizedReason, + reviewRedispatchReason: String(reason || "inreview_redispatch").trim() || "inreview_redispatch", ...extra, }, - { dedupKey: `workflow-event:${eventType}:${taskId}:inreview:${normalizedReason}` }, + { dedupKey: `workflow-event:task.assigned:${taskId}:inreview:${String(reason || "inreview_redispatch").trim() || "inreview_redispatch"}` }, ); return true; } -function isReviewRedispatchCoolingDown(taskId, now = Date.now()) { - const normalizedTaskId = String(taskId || "").trim(); - if (!normalizedTaskId) return false; - const existing = reviewRedispatchCooldownByTask.get(normalizedTaskId); - return Boolean( - existing && - Number.isFinite(existing.at) && - now - existing.at < REVIEW_REDISPATCH_COOLDOWN_MS - ); -} - function parsePositivePrNumber(value) { const parsed = Number(String(value || "").trim()); return Number.isFinite(parsed) && parsed > 0 ? parsed : null; @@ -10750,7 +10463,7 @@ async function loadCodexSdk() { async function tryImportCodex() { try { - const mod = await import("@openai/" + "codex-sdk"); + const mod = await import("@openai/codex-sdk"); return mod.Codex; } catch (err) { return null; @@ -10774,8 +10487,7 @@ function installDependencies() { return res.status === 0; } - const npmCommand = process.platform === "win32" ? "npm.cmd" : "npm"; - const npm = spawnSync(npmCommand, ["install"], { cwd, stdio: "inherit" }); + const npm = spawnSync("npm", ["install"], { cwd, stdio: "inherit" }); return npm.status === 0; } @@ -12389,42 +12101,6 @@ function stopMonitorMonitorSupervisor({ preserveRunning = false } = {}) { } } -function isHeartbeatMonitorEnabled() { - if (isMonitorTestRuntime) return false; - const raw = String(process.env.BOSUN_HEARTBEAT_MONITOR_ENABLED || "true") - .trim() - .toLowerCase(); - return !["0", "false", "no", "off"].includes(raw); -} - -function restartHeartbeatMonitor() { - heartbeatRuntimeState.current?.stop?.(); - heartbeatRuntimeState.current = null; - - if (!isHeartbeatMonitorEnabled()) return; - const configDir = String(config?.configDir || "").trim(); - if (!configDir) return; - - heartbeatRuntimeState.current = createHeartbeatMonitor({ - configDir, - logDir, - intervalMs: Number(process.env.BOSUN_HEARTBEAT_INTERVAL_MS || 30_000), - timeoutMs: Number(process.env.BOSUN_HEARTBEAT_TIMEOUT_MS || 5_000), - successLogIntervalMs: Number( - process.env.BOSUN_HEARTBEAT_SUCCESS_LOG_INTERVAL_MS || 10 * 60_000, - ), - eventLoopWarnMs: Number(process.env.BOSUN_HEARTBEAT_LAG_WARN_MS || 1000), - logger: console, - }); - heartbeatRuntimeState.current.start(); - console.log("[monitor] heartbeat monitor started"); -} - -function stopHeartbeatMonitor() { - heartbeatRuntimeState.current?.stop?.(); - heartbeatRuntimeState.current = null; -} - /** * Called when a Live Digest window is sealed. * This provides fresh high-priority context and triggers an immediate run. @@ -12634,7 +12310,6 @@ async function startProcess() { const child = spawn(orchestratorCmd, orchestratorArgs, { stdio: ["ignore", "pipe", "pipe"], - windowsHide: true, }); currentChild = child; @@ -13593,7 +13268,6 @@ function applyConfig(nextConfig, options = {}) { } else { stopMonitorMonitorSupervisor(); } - restartHeartbeatMonitor(); restartGitHubReconciler(); const nextArgs = scriptArgs?.join(" ") || ""; @@ -13631,7 +13305,6 @@ const stopTuiConfigReloadListener = onConfigReload((payload = {}) => { process.on("SIGINT", async () => { shuttingDown = true; stopWorkspaceSyncTimers(); - stopHeartbeatMonitor(); stopAutoUpdateLoop(); stopAgentAlertTailer(); stopAgentWorkAnalyzer(); @@ -13680,7 +13353,6 @@ process.on("exit", () => { try { stopTuiConfigReloadListener?.(); } catch { /* best effort */ } shuttingDown = true; stopWorkspaceSyncTimers(); - stopHeartbeatMonitor(); stopAgentAlertTailer(); stopAgentWorkAnalyzer(); runDetachedDuringShutdown("workspace-monitor-shutdown:exit", () => @@ -13694,7 +13366,6 @@ process.on("exit", () => { process.on("SIGTERM", async () => { shuttingDown = true; stopWorkspaceSyncTimers(); - stopHeartbeatMonitor(); stopAutoUpdateLoop(); stopAgentAlertTailer(); stopAgentWorkAnalyzer(); @@ -13839,22 +13510,6 @@ process.on("exit", (code) => { appendMonitorCrashBreadcrumb(line); }); -const workflowStartupRecoveryGraceMs = Math.max( - 0, - Number(configWorkflowRecovery?.startupGraceMs || 0), -); -const workflowStartupRecoveryStepDelayMs = Math.max( - 0, - Number(configWorkflowRecovery?.startupStepDelayMs || 0), -); - -function scheduleStartupWorkflowRecovery(name, handler, step = 0) { - const delayMs = - workflowStartupRecoveryGraceMs + - Math.max(0, step) * workflowStartupRecoveryStepDelayMs; - safeSetTimeout(name, handler, delayMs); -} - if (!isMonitorTestRuntime) { const DUPLICATE_START_EXIT_STATE_FILE = "monitor-duplicate-start-exit-state.json"; @@ -13996,10 +13651,6 @@ console.log("[monitor] legacy maintenance sweep removed — use workflow schedul safeSetInterval("flush-error-queue", () => flushErrorQueue(), 60 * 1000); -safeSetInterval("queued-runtime-restart-request", () => { - maybeHandleQueuedRuntimeRestartRequest("interval"); -}, RUNTIME_RESTART_REQUEST_POLL_MS); - // Legacy periodic maintenance sweep removed (workflow-only control). // ── Workflow schedule trigger polling ─────────────────────────────────────── @@ -14021,7 +13672,6 @@ pollWorkflowSchedulesOnce = async function pollWorkflowSchedulesOnce( return; } const includeTaskPoll = opts?.includeTaskPoll !== false; - const includeScheduled = opts?.includeScheduled !== false; const triggered = engine.evaluateScheduleTriggers({ configDir: repoRoot }); if (!Array.isArray(triggered) || triggered.length === 0) return; @@ -14029,18 +13679,14 @@ pollWorkflowSchedulesOnce = async function pollWorkflowSchedulesOnce( for (const match of triggered) { const workflowId = String(match?.workflowId || "").trim(); if (!workflowId) continue; - const workflow = typeof engine.get === "function" ? engine.get(workflowId) : null; - const triggerNode = Array.isArray(workflow?.nodes) - ? workflow.nodes.find((node) => node?.id === match?.triggeredBy) - : null; - const isTaskPollTrigger = - triggerNode?.type === "trigger.task_available" || - triggerNode?.type === "trigger.task_low"; - if (!includeTaskPoll && isTaskPollTrigger) { - continue; - } - if (!includeScheduled && !isTaskPollTrigger) { - continue; + if (!includeTaskPoll) { + const workflow = typeof engine.get === "function" ? engine.get(workflowId) : null; + const triggerNode = Array.isArray(workflow?.nodes) + ? workflow.nodes.find((node) => node?.id === match?.triggeredBy) + : null; + if (triggerNode?.type === "trigger.task_available" || triggerNode?.type === "trigger.task_low") { + continue; + } } void engine .execute(workflowId, { @@ -14085,22 +13731,6 @@ safeSetInterval("workflow-schedule-check", async () => { await pollWorkflowSchedulesOnce(); }, scheduleCheckIntervalMs); -// ── Periodic workflow run file pruning: once per day ───────────────────── -// Deletes run detail files beyond MAX_PERSISTED_RUNS to keep the workflow-runs -// directory bounded and prevent O(n) dir-scan slowdown on history API calls. -safeSetInterval("workflow-run-file-prune", async () => { - try { - const engine = await ensureWorkflowAutomationEngine(); - if (typeof engine?.pruneOldRunFiles !== "function") return; - const result = engine.pruneOldRunFiles(); - if (result.deleted > 0) { - console.log(`[workflows] pruned ${result.deleted} old run file(s), kept ${result.kept}`); - } - } catch (err) { - console.warn(`[workflows] run-file prune error: ${err?.message || err}`); - } -}, 24 * 60 * 60 * 1000); - safeSetInterval("workflow-review-merge-reconcile", async () => { const result = await checkMergedPRsAndUpdateTasks(); if ((result?.movedDone || 0) > 0) { @@ -14129,16 +13759,16 @@ async function syncDivergedWorktrees() { try { // Fetch remote to update tracking refs - await execAsync("git fetch origin --no-tags", { - cwd: wtPath, timeout: 30_000, + execSync("git fetch origin --no-tags", { + cwd: wtPath, timeout: 30_000, stdio: ["ignore", "pipe", "pipe"], }); // Check ahead/behind vs remote tracking ref const remoteRef = `origin/${branch}`; let remoteExists = false; try { - await execAsync(`git rev-parse --verify ${remoteRef}`, { - cwd: wtPath, timeout: 5_000, + execSync(`git rev-parse --verify ${remoteRef}`, { + cwd: wtPath, timeout: 5_000, stdio: ["ignore", "pipe", "pipe"], }); remoteExists = true; } catch { /* branch not yet pushed — nothing to sync */ } @@ -14146,13 +13776,13 @@ async function syncDivergedWorktrees() { if (!remoteExists) continue; const ahead = parseInt( - (await execAsync(`git rev-list --count ${remoteRef}..HEAD`, { - cwd: wtPath, timeout: 10_000, - })).trim(), 10); + execSync(`git rev-list --count ${remoteRef}..HEAD`, { + cwd: wtPath, encoding: "utf8", timeout: 10_000, stdio: ["ignore", "pipe", "pipe"], + }).trim(), 10); const behind = parseInt( - (await execAsync(`git rev-list --count HEAD..${remoteRef}`, { - cwd: wtPath, timeout: 10_000, - })).trim(), 10); + execSync(`git rev-list --count HEAD..${remoteRef}`, { + cwd: wtPath, encoding: "utf8", timeout: 10_000, stdio: ["ignore", "pipe", "pipe"], + }).trim(), 10); // Only act on diverged worktrees (behind > 0 AND ahead > 0) if (ahead === 0 || behind === 0) continue; @@ -14164,12 +13794,12 @@ async function syncDivergedWorktrees() { // Rebase local onto remote tracking ref to incorporate remote commits let rebased = false; try { - await execAsync(`git rebase ${remoteRef}`, { - cwd: wtPath, timeout: 60_000, + execSync(`git rebase ${remoteRef}`, { + cwd: wtPath, encoding: "utf8", timeout: 60_000, stdio: ["ignore", "pipe", "pipe"], }); rebased = true; } catch (rebaseErr) { - try { await execAsync("git rebase --abort", { cwd: wtPath, timeout: 10_000 }); } catch { /* ok */ } + try { execSync("git rebase --abort", { cwd: wtPath, timeout: 10_000, stdio: ["ignore", "pipe", "pipe"] }); } catch { /* ok */ } console.warn( `[monitor:worktree-sync] ${branch} rebase conflict — skipping push: ${rebaseErr.message?.slice(0, 200)}`, ); @@ -14179,8 +13809,8 @@ async function syncDivergedWorktrees() { // Safety: refuse to push if HEAD now equals origin/main (would wipe PR changes) try { - const headSha = (await execAsync("git rev-parse HEAD", { cwd: wtPath, timeout: 5_000 })).trim(); - const mainSha = (await execAsync("git rev-parse origin/main", { cwd: wtPath, timeout: 5_000 })).trim(); + const headSha = execSync("git rev-parse HEAD", { cwd: wtPath, encoding: "utf8", timeout: 5_000, stdio: ["pipe", "pipe", "pipe"] }).trim(); + const mainSha = execSync("git rev-parse origin/main", { cwd: wtPath, encoding: "utf8", timeout: 5_000, stdio: ["pipe", "pipe", "pipe"] }).trim(); if (headSha === mainSha) { console.warn(`[monitor:worktree-sync] ${branch} HEAD matches origin/main after rebase — aborting push to prevent PR wipe`); failed++; @@ -14190,8 +13820,8 @@ async function syncDivergedWorktrees() { // Push with --force-with-lease (safe: we just fetched fresh remote refs) try { - await execAsync(`git push --force-with-lease --set-upstream origin HEAD`, { - cwd: wtPath, timeout: 30_000, + execSync(`git push --force-with-lease --set-upstream origin HEAD`, { + cwd: wtPath, encoding: "utf8", timeout: 30_000, stdio: ["ignore", "pipe", "pipe"], }); console.log(`[monitor:worktree-sync] ${branch} sync-pushed successfully`); synced++; @@ -14236,21 +13866,11 @@ if (logMaxSizeMb > 0) { } } -const STARTUP_EPIC_CHECK_DELAY_MS = parseEnvInteger( - process.env.BOSUN_STARTUP_EPIC_CHECK_DELAY_MS, - 2 * 60 * 1000, - { min: 30 * 1000, max: 30 * 60 * 1000 }, -); -const STARTUP_DEPENDABOT_CHECK_DELAY_MS = parseEnvInteger( - process.env.BOSUN_STARTUP_DEPENDABOT_CHECK_DELAY_MS, - STARTUP_EPIC_CHECK_DELAY_MS + 30 * 1000, - { min: STARTUP_EPIC_CHECK_DELAY_MS, max: 30 * 60 * 1000 }, -); - -// Stagger the heaviest git/gh maintenance away from initial startup so the UI -// server can answer readiness probes before long-running repo checks begin. -safeSetTimeout("startup-epic-check", () => checkEpicBranches("startup"), STARTUP_EPIC_CHECK_DELAY_MS); -safeSetTimeout("startup-dependabot-auto-merge", () => checkAndMergeDependabotPRs(), STARTUP_DEPENDABOT_CHECK_DELAY_MS); +// Run once immediately after startup (delayed by 30s to let things settle) +safeSetTimeout("startup-health-checks", () => { + checkEpicBranches("startup"); + return checkAndMergeDependabotPRs(); +}, 30 * 1000); // ── Fleet Coordination ─────────────────────────────────────────────────────── if (fleetConfig?.enabled) { @@ -14447,8 +14067,6 @@ let reviewAgent = null; /** @type {Map} */ const reviewGateResults = new Map(); const pendingMergeStrategyByTask = new Map(); -const reviewRedispatchCooldownByTask = new Map(); -const REVIEW_REDISPATCH_COOLDOWN_MS = 5 * 60 * 1000; /** @type {null} Sync engine lifecycle now managed by workflow template */ let syncEngine = null; /** @type {import("./error-detector.mjs").ErrorDetector|null} */ @@ -14459,43 +14077,33 @@ let agentSupervisor = null; if (!isMonitorTestRuntime) { if (workflowAutomationEnabled) { await ensureWorkflowAutomationEngine().catch(() => {}); - scheduleStartupWorkflowRecovery( - "startup-stale-dispatch-unstick", + runWorkflowRecoveryWithPolicy( + "stale-dispatch-unstick", () => - void runWorkflowRecoveryWithPolicy( - "stale-dispatch-unstick", - () => - pollWorkflowSchedulesOnce("startup", { - includeTaskPoll: false, - requireEngine: true, - throwOnError: true, - }), - { - trigger: "startup", - operationType: "stale-dispatch-unstick", - includeTaskPoll: false, - }, - ), - 0, + pollWorkflowSchedulesOnce("startup", { + includeTaskPoll: false, + requireEngine: true, + throwOnError: true, + }), + { + trigger: "startup", + operationType: "stale-dispatch-unstick", + includeTaskPoll: false, + }, ); - scheduleStartupWorkflowRecovery( - "startup-workflow-history-unstick", - () => - void runWorkflowRecoveryWithPolicy( - "workflow-history-unstick", - async () => { - const engine = await ensureWorkflowAutomationEngine(); - if (!engine?.resumeInterruptedRuns) { - throw new Error("workflow engine resumeInterruptedRuns unavailable"); - } - await engine.resumeInterruptedRuns(); - }, - { - trigger: "startup", - operationType: "workflow-history-unstick", - }, - ), - 1, + runWorkflowRecoveryWithPolicy( + "workflow-history-unstick", + async () => { + const engine = await ensureWorkflowAutomationEngine(); + if (!engine?.resumeInterruptedRuns) { + throw new Error("workflow engine resumeInterruptedRuns unavailable"); + } + await engine.resumeInterruptedRuns(); + }, + { + trigger: "startup", + operationType: "workflow-history-unstick", + }, ); } else { console.log( @@ -14752,24 +14360,18 @@ if (isExecutorDisabled()) { internalTaskExecutor = getTaskExecutor(execOpts); internalTaskExecutor.start(); if (workflowOwnsTaskExecutorLifecycle) { - scheduleStartupWorkflowRecovery( - "startup-stale-dispatch-task-poll-unstick", + runWorkflowRecoveryWithPolicy( + "stale-dispatch-task-poll-unstick", () => - void runWorkflowRecoveryWithPolicy( - "stale-dispatch-task-poll-unstick", - () => - pollWorkflowSchedulesOnce("startup", { - includeScheduled: false, - requireEngine: true, - throwOnError: true, - }), - { - trigger: "startup", - operationType: "stale-dispatch-task-poll-unstick", - includeTaskPoll: true, - }, - ), - 2, + pollWorkflowSchedulesOnce("startup", { + requireEngine: true, + throwOnError: true, + }), + { + trigger: "startup", + operationType: "stale-dispatch-task-poll-unstick", + includeTaskPoll: true, + }, ); } @@ -14910,24 +14512,13 @@ if (isExecutorDisabled()) { errorDetector: errorDetector || undefined, sendTelegram: telegramToken && telegramChatId - ? (msg, options) => void sendTelegramMessage(msg, options) + ? (msg) => void sendTelegramMessage(msg) : null, getTask: (taskId) => getInternalTask(taskId), setTaskStatus: (taskId, status, source) => setInternalTaskStatus(taskId, status, source), updateTask: (taskId, updates) => updateInternalTask(taskId, updates), - retryReviewThreshold: Number(internalExecutorConfig?.retryReviewThreshold || 0), - retryDelayMs: Number(internalExecutorConfig?.retryDelayMs || 15_000), - onRetryThresholdExceeded: ({ taskId }) => { - const normalizedTaskId = String(taskId || "").trim(); - if (!normalizedTaskId) return; - const task = getInternalTask(normalizedTaskId); - if (!task) return; - redispatchInReviewTask(task, "retry-threshold-review-fix", { - workflowEvent: "task.review_fix_requested", - }); - }, // broadcastUiEvent is wired later when UI server starts via // injectUiDependencies → setBroadcastFn pattern }); @@ -14948,7 +14539,7 @@ if (isExecutorDisabled()) { eventBus: agentEventBus || undefined, sendTelegram: telegramToken && telegramChatId - ? (msg, options) => void sendTelegramMessage(msg, options) + ? (msg) => void sendTelegramMessage(msg) : null, getTask: (taskId) => getInternalTask(taskId), setTaskStatus: (taskId, status, source) => @@ -14979,12 +14570,7 @@ if (isExecutorDisabled()) { }, dispatchFixTask: (taskId, issues) => { const normalizedTaskId = String(taskId || "").trim(); - if (!normalizedTaskId) { - return { - dispatched: false, - reason: "missing_task_id", - }; - } + if (!normalizedTaskId) return; const task = getInternalTask(normalizedTaskId); const issueList = Array.isArray(issues) ? issues : []; const issueCount = issueList.length; @@ -14994,14 +14580,7 @@ if (isExecutorDisabled()) { console.warn( `[monitor] supervisor dispatch-fix skipped for ${normalizedTaskId}: status=${status}`, ); - return { - dispatched: false, - reason: "status_not_inreview", - status, - issueCount, - taskId: normalizedTaskId, - taskTitle: task?.title || normalizedTaskId, - }; + return; } if (hasActiveSession(normalizedTaskId)) { @@ -15029,13 +14608,7 @@ if (isExecutorDisabled()) { `[monitor] supervisor dispatch-fix steering active session for ${normalizedTaskId}`, ); steerActiveThread(normalizedTaskId, prompt); - return { - dispatched: true, - mode: "active_session", - issueCount, - taskId: normalizedTaskId, - taskTitle: task?.title || normalizedTaskId, - }; + return; } console.warn( @@ -15043,16 +14616,8 @@ if (isExecutorDisabled()) { ); redispatchInReviewTask(task || { id: normalizedTaskId, title: normalizedTaskId }, "review-fix-redispatch", { reviewIssueCount: issueCount, - reviewIssues: issueList, workflowEvent: "task.review_fix_requested", }); - return { - dispatched: true, - mode: "redispatch", - issueCount, - taskId: normalizedTaskId, - taskTitle: task?.title || normalizedTaskId, - }; }, }); agentSupervisor.start(); @@ -15082,7 +14647,7 @@ if (isExecutorDisabled()) { ), sendTelegram: telegramToken && telegramChatId - ? (msg, options) => void sendTelegramMessage(msg, options) + ? (msg) => void sendTelegramMessage(msg) : null, promptTemplate: agentPrompts?.reviewer, onReviewComplete: (taskId, result) => { @@ -15167,26 +14732,9 @@ if (isExecutorDisabled()) { if (Array.isArray(pending) && pending.length > 0) { let requeued = 0; let redispatchedMissingRefs = 0; - let skippedReviewed = 0; for (const task of pending) { const taskId = String(task?.id || "").trim(); if (!taskId) continue; - const reviewStatus = String(task?.reviewStatus || "").trim().toLowerCase(); - if (hasCurrentReviewVerdict(task)) { - if ( - reviewStatus === "changes_requested" && - !hasActiveSession(taskId) && - !isReviewRedispatchCoolingDown(taskId) - ) { - redispatchInReviewTask(task, "review-agent-rehydrate-review-fix", { - workflowEvent: "task.review_fix_requested", - }); - redispatchedMissingRefs += 1; - continue; - } - skippedReviewed += 1; - continue; - } const branchName = String(task?.branchName || "").trim(); let prUrl = String(task?.prUrl || "").trim(); let prNumber = String(task?.prNumber || "").trim(); @@ -15259,11 +14807,6 @@ if (isExecutorDisabled()) { `[monitor] review agent redispatched ${redispatchedMissingRefs} inreview task(s) with missing review references`, ); } - if (skippedReviewed > 0) { - console.log( - `[monitor] review agent skipped ${skippedReviewed} already-reviewed inreview task(s) during rehydrate`, - ); - } } } catch (err) { console.warn( @@ -15506,7 +15049,8 @@ export { // Container runner re-exports getContainerStatus, isContainerEnabled, - dispatchWorkflowEvent, // Workflow event bridge — for fleet/kanban modules to emit events queueWorkflowEvent, }; + + diff --git a/infra/preflight.mjs b/infra/preflight.mjs index 8551a5c31..498fd90fd 100644 --- a/infra/preflight.mjs +++ b/infra/preflight.mjs @@ -3,10 +3,7 @@ import { existsSync } from "node:fs"; import { resolve } from "node:path"; import os from "node:os"; import { resolvePwshRuntime } from "../shell/pwsh-runtime.mjs"; -import { - ensureGitHooksPath, - inspectWorktreeRuntimeSetup, -} from "../workspace/worktree-setup.mjs"; +import { inspectWorktreeRuntimeSetup } from "../workspace/worktree-setup.mjs"; const isWindows = process.platform === "win32"; const MIN_FREE_GB = Number(process.env.BOSUN_MIN_FREE_GB || "10"); @@ -95,35 +92,9 @@ function checkWorktreeClean(repoRoot) { function checkWorktreeRuntimeSetup(repoRoot) { if (!existsSync(resolve(repoRoot, ".githooks"))) { - return { - ok: true, - issues: [], - hooksPath: "", - missingFiles: [], - repairedHooksPath: false, - repairError: "", - }; + return { ok: true, issues: [], hooksPath: "", missingFiles: [] }; } - const initial = inspectWorktreeRuntimeSetup(repoRoot, repoRoot); - const needsHooksPathRepair = initial.issues.some((issue) => - /core\.hooksPath/i.test(String(issue || "")), - ); - - if (!needsHooksPathRepair) { - return { - ...initial, - repairedHooksPath: false, - repairError: "", - }; - } - - const repair = ensureGitHooksPath(repoRoot); - const final = inspectWorktreeRuntimeSetup(repoRoot, repoRoot); - return { - ...final, - repairedHooksPath: repair.changed === true, - repairError: repair.error || "", - }; + return inspectWorktreeRuntimeSetup(repoRoot, repoRoot); } /** @@ -219,42 +190,6 @@ function checkToolVersion(label, command, args, hint) { return { label, ok: true, version }; } -function checkHookShell(repoRoot) { - if (!isWindows || !existsSync(resolve(repoRoot, ".githooks"))) { - return { ok: true, issue: null, resolvedPath: null, allPaths: [] }; - } - - const res = runCommand("where", ["bash"]); - if (res.error || res.status !== 0) { - return { - ok: false, - issue: "missing_bash", - resolvedPath: null, - allPaths: [], - message: "bash is not on PATH; Git hook execution may fail on Windows.", - }; - } - - const allPaths = readOutput(res) - .split(/\r?\n/) - .map((entry) => entry.trim()) - .filter(Boolean); - const resolvedPath = allPaths[0] || null; - const normalizedPath = String(resolvedPath || "").replace(/\\/g, "/").toLowerCase(); - if (normalizedPath.includes("/windows/system32/bash.exe")) { - return { - ok: false, - issue: "wsl_bash_first", - resolvedPath, - allPaths, - message: - "PATH resolves bash to WSL first. Git hooks in Windows worktrees should use Git for Windows bash (for example C:/Program Files/Git/bin/bash.exe).", - }; - } - - return { ok: true, issue: null, resolvedPath, allPaths }; -} - function parseEnvBool(value, fallback = false) { if (value === undefined || value === null || value === "") return fallback; const raw = String(value).trim().toLowerCase(); @@ -279,8 +214,6 @@ function checkToolchain() { "git", "gh", "node", - "npm", - "rg", shellMode ? "shell" : "pwsh", ]); const pwshRuntime = resolvePwshRuntime({ preferBundled: true }); @@ -304,24 +237,12 @@ function checkToolchain() { ["--version"], "Install Node.js 18+ and ensure it is on PATH.", ), - checkToolVersion( - "npm", - "npm", - ["--version"], - "Install npm and ensure it is on PATH. On Windows, verify npm commands can be spawned from PowerShell.", - ), checkToolVersion( "pnpm", "pnpm", ["--version"], "Install pnpm (npm install -g pnpm) and ensure it is on PATH.", ), - checkToolVersion( - "rg", - "rg", - ["--version"], - "Install ripgrep (rg) and ensure it is on PATH.", - ), checkToolVersion( "go", "go", @@ -427,34 +348,17 @@ export function runPreflightChecks(options = {}) { } const runtimeSetup = checkWorktreeRuntimeSetup(repoRoot); - if (runtimeSetup.repairedHooksPath) { - warnings.push({ - title: "Git hooks path auto-repaired", - message: 'Reset git core.hooksPath to ".githooks" during preflight.', - }); - } if (!runtimeSetup.ok) { errors.push({ title: "Worktree runtime setup is incomplete", message: runtimeSetup.issues.join(os.EOL) + - (runtimeSetup.repairError - ? `${os.EOL}Repair attempt failed: ${runtimeSetup.repairError}` - : "") + (runtimeSetup.missingFiles.length > 0 ? `${os.EOL}Run Bosun setup or bootstrap the repo so worktrees include the required hook/config files.` : ""), }); } - const hookShell = checkHookShell(repoRoot); - if (!hookShell.ok) { - warnings.push({ - title: "Windows hook shell may be misconfigured", - message: hookShell.message, - }); - } - if (toolchain.ok) { ghAuth = checkGhAuth(); if (!ghAuth.ok) { @@ -482,10 +386,8 @@ export function runPreflightChecks(options = {}) { toolchain, gitConfig, worktree, - hookShell, ghAuth, disk, - runtimeSetup, minFreeBytes: MIN_FREE_BYTES, }, }; @@ -532,11 +434,6 @@ export function formatPreflightReport(result, options = {}) { ); } - const hookShell = result.details?.hookShell; - if (hookShell?.resolvedPath) { - lines.push(`Hook shell: ${hookShell.resolvedPath}`); - } - if (result.errors.length) { lines.push("Errors:"); for (const err of result.errors) { diff --git a/infra/runtime-accumulator.mjs b/infra/runtime-accumulator.mjs index b6d330631..051b063e2 100644 --- a/infra/runtime-accumulator.mjs +++ b/infra/runtime-accumulator.mjs @@ -123,10 +123,6 @@ function normalizeCompletedSession(session = {}) { const sessionKey = String( session.sessionKey || `${taskId || "task"}:${stableId}:${startedAt}:${endedAt}`, ).trim(); - const turnCount = Math.max(0, toFiniteNumber(session.turnCount, 0)); - const turns = Array.isArray(session.turns) - ? session.turns.map((turn) => ({ ...turn })) - : []; return { type: "completed_session", @@ -143,8 +139,6 @@ function normalizeCompletedSession(session = {}) { tokenCount, inputTokens, outputTokens, - turnCount, - turns, costUsd, recordedAt: String(session.recordedAt || new Date().toISOString()), }; @@ -363,7 +357,6 @@ export function getRuntimeStats() { runtimeMs: _state.runtimeMs, totalCostUsd: _state.totalCostUsd, sessionCount: _state.completedSessions.length, - completedSessions: _state.completedSessions.map((entry) => ({ ...entry })), startedAt: _state.startedAt, lastUpdated: _state.lastUpdated, }; @@ -504,11 +497,7 @@ export function getSessionAccumulatorLogPath() { } export function _resetRuntimeAccumulatorForTests(options = {}) { - // When no explicit cacheDir is given, prefer the test-sandbox dir set by - // bootstrapTestRuntime() so that bare reset calls (e.g. in finally blocks) - // never redirect writes back to the real workspace .cache folder. - const fallback = process.env.BOSUN_TEST_CACHE_DIR || DEFAULT_CACHE_DIR; - configureCachePaths(options.cacheDir || fallback); + configureCachePaths(options.cacheDir || DEFAULT_CACHE_DIR); _state = cloneDefaultState(); _initialized = false; _lastSaveTime = 0; diff --git a/infra/session-tracker.mjs b/infra/session-tracker.mjs index 82baf4a16..3be44362c 100644 --- a/infra/session-tracker.mjs +++ b/infra/session-tracker.mjs @@ -11,7 +11,7 @@ * @module session-tracker */ -import { existsSync, mkdirSync, readFileSync, readdirSync, statSync, writeFileSync, unlinkSync } from "node:fs"; +import { existsSync, mkdirSync, readFileSync, readdirSync, writeFileSync, unlinkSync } from "node:fs"; import { resolve, dirname, sep } from "node:path"; import { randomBytes } from "node:crypto"; import { fileURLToPath } from "node:url"; @@ -52,71 +52,7 @@ const MAX_MESSAGE_CHARS = 100_000; /** Maximum total sessions to keep in memory. */ const MAX_SESSIONS = 100; -const TERMINAL_SESSION_STATUSES = new Set([ - "completed", - "failed", - "idle", - "archived", - "stalled", - "blocked_by_repo", - "blocked_by_env", - "no_output", - "implementation_done_commit_blocked", -]); - -const SESSION_PLACEHOLDER_OUTPUTS = new Set([ - "continued", - "model response continued", - "turn completed", - "session completed", - "agent is composing a response...", - "agent is composing a response…", -]); - -const REPO_BLOCK_PATTERNS = [ - /merge conflict/i, - /unmerged files/i, - /protected branch/i, - /non-fast-forward/i, - /failed to push/i, - /push rejected/i, - /cannot rebase/i, - /pre-push hook/i, - /hook declined/i, - /working tree has changes/i, - /index contains uncommitted changes/i, -]; - -const ENV_BLOCK_PATTERNS = [ - /prompt quality/i, - /missing task (description|url)/i, - /missing tool/i, - /not recognized as an internal or external command/i, - /command not found/i, - /spawn .*enoent/i, - /enoent/i, - /permission denied/i, - /access is denied/i, - /authentication failed/i, - /not authenticated/i, - /missing credentials/i, - /token/i, - /connection refused/i, - /connection reset/i, - /network/i, - /timeout/i, - /sdk unavailable/i, - /failed to list models/i, -]; - -const COMMIT_BLOCK_PATTERNS = [ - /commit blocked/i, - /implementation_done_commit_blocked/i, - /git commit/i, - /git push/i, - /pre-push hook/i, - /hook/i, -]; +const TERMINAL_SESSION_STATUSES = new Set(["completed", "failed", "idle", "archived"]); function isTerminalSessionStatus(status) { return TERMINAL_SESSION_STATUSES.has(String(status || "").trim().toLowerCase()); @@ -136,85 +72,8 @@ function resolveSessionTrackerPersistDir(options = {}) { export const _test = Object.freeze({ resolveSessionTrackerSourceRepoRoot, resolveSessionTrackerPersistDir, - deriveTerminalSessionStatus, }); -function normalizeSessionStatus(status, fallback = "completed") { - const normalized = String(status || "").trim().toLowerCase(); - return normalized || fallback; -} - -function getSessionMessageText(message) { - return String(message?.content || message?.summary || "").trim(); -} - -function hasMeaningfulSessionOutput(session) { - const messages = Array.isArray(session?.messages) ? session.messages : []; - return messages.some((message) => { - const text = getSessionMessageText(message); - if (!text) return false; - const normalized = text.replace(/\s+/g, " ").trim().toLowerCase(); - if (!normalized || SESSION_PLACEHOLDER_OUTPUTS.has(normalized)) return false; - const messageType = String(message?.type || "").trim().toLowerCase(); - const messageRole = String(message?.role || "").trim().toLowerCase(); - if (messageType === "agent_message" || messageType === "assistant" || messageRole === "assistant") { - return true; - } - if (message?.type === "tool_call" || message?.type === "tool_result") return true; - if (message?.type === "error") return true; - return /edit|write|create|patch|commit|push|search|diff|test/i.test(text); - }); -} - -function classifyBlockedSessionText(text) { - const normalized = String(text || "").replace(/\s+/g, " ").trim(); - if (!normalized) return null; - if (COMMIT_BLOCK_PATTERNS.some((pattern) => pattern.test(normalized))) { - return "implementation_done_commit_blocked"; - } - if (REPO_BLOCK_PATTERNS.some((pattern) => pattern.test(normalized))) { - return "blocked_by_repo"; - } - if (ENV_BLOCK_PATTERNS.some((pattern) => pattern.test(normalized))) { - return "blocked_by_env"; - } - return null; -} - -function deriveIdleTerminalSessionStatus(session) { - return hasMeaningfulSessionOutput(session) ? "stalled" : "no_output"; -} - -function deriveTerminalSessionStatus(session, requestedStatus = "completed") { - const normalizedRequested = normalizeSessionStatus(requestedStatus); - if ( - normalizedRequested !== "completed" && - normalizedRequested !== "idle" && - normalizedRequested !== "active" - ) { - return normalizedRequested; - } - - if (normalizedRequested === "idle" || normalizedRequested === "active") { - return deriveIdleTerminalSessionStatus(session); - } - - const messages = Array.isArray(session?.messages) ? session.messages : []; - if (!messages.length || !hasMeaningfulSessionOutput(session)) { - return "no_output"; - } - - const recentText = messages - .slice(-8) - .map((message) => getSessionMessageText(message)) - .filter(Boolean) - .join("\n"); - const blockedStatus = classifyBlockedSessionText(recentText); - if (blockedStatus) return blockedStatus; - - return "completed"; -} - function resolveSessionMaxMessages(type, metadata, explicitMax, fallbackMax) { if (Number.isFinite(explicitMax)) { return explicitMax > 0 ? explicitMax : 0; @@ -229,52 +88,6 @@ function resolveSessionMaxMessages(type, metadata, explicitMax, fallbackMax) { return fallbackMax; } -function buildSessionRecordFromPersistedData(data, idleThresholdMs) { - if (!data || typeof data !== "object") return null; - const id = String(data.id || data.taskId || "").trim(); - if (!id) return null; - - const createdAt = String(data.createdAt || "").trim() || new Date().toISOString(); - const lastActiveAt = String(data.lastActiveAt || data.updatedAt || "").trim() || createdAt; - const lastActiveMs = Date.parse(lastActiveAt) || Date.parse(createdAt) || Date.now(); - - let status = normalizeSessionStatus(data.status || "completed"); - let endedAt = data.endedAt || null; - if (status === "active" && lastActiveMs > 0) { - const ageMs = Date.now() - lastActiveMs; - if (ageMs > idleThresholdMs) { - status = deriveIdleTerminalSessionStatus(data); - endedAt = endedAt || lastActiveMs; - } - } - - const messages = Array.isArray(data.messages) ? data.messages : []; - return { - id, - taskId: data.taskId || id, - taskTitle: data.taskTitle || data.title || data.metadata?.title || id, - sessionKey: - String(data.sessionKey || "").trim() || - `${data.taskId || id}:${data.startedAt || lastActiveMs}:${endedAt || data.startedAt || lastActiveMs}`, - type: data.type || "task", - status, - createdAt, - lastActiveAt, - startedAt: data.startedAt || (createdAt ? new Date(createdAt).getTime() : lastActiveMs), - endedAt, - messages, - totalEvents: messages.length, - turnCount: data.turnCount || 0, - turns: Array.isArray(data.turns) ? data.turns : [], - accumulatedAt: data.accumulatedAt || null, - lastActivityAt: lastActiveMs, - metadata: data.metadata || {}, - insights: data.insights || buildSessionInsights({ messages }), - trajectory: data.trajectory || { version: 1, replayable: true, steps: [] }, - summary: data.summary || null, - }; -} - // ── Message Types ─────────────────────────────────────────────────────────── /** @@ -293,125 +106,12 @@ function buildSessionRecordFromPersistedData(data, idleThresholdMs) { * @property {number|null} endedAt * @property {SessionMessage[]} messages * @property {number} totalEvents - Total events received (before truncation) - * @property {Array} [turns] - Per-turn rollup timeline * @property {string} status - "active"|"completed"|"idle"|"failed" * @property {number} lastActivityAt - Timestamp of last event */ -function parseTimestampMs(value, fallback = Date.now()) { - const parsed = Date.parse(String(value || "")); - return Number.isFinite(parsed) ? parsed : fallback; -} - -function normalizeTokenNumber(value) { - const num = Number(value); - return Number.isFinite(num) && num >= 0 ? Math.round(num) : 0; -} - -function extractUsageFromMeta(meta) { - if (!meta || typeof meta !== "object") return null; - const usage = meta.usage && typeof meta.usage === "object" ? meta.usage : meta; - const inputTokens = normalizeTokenNumber( - usage.inputTokens ?? usage.prompt_tokens ?? usage.promptTokens ?? usage.input_tokens, - ); - const outputTokens = normalizeTokenNumber( - usage.outputTokens ?? usage.completion_tokens ?? usage.completionTokens ?? usage.output_tokens, - ); - const totalTokens = normalizeTokenNumber( - usage.totalTokens ?? usage.total_tokens ?? (inputTokens + outputTokens), - ); - if (inputTokens <= 0 && outputTokens <= 0 && totalTokens <= 0) return null; - return { inputTokens, outputTokens, totalTokens }; -} - -function ensureSessionTurns(session) { - if (!Array.isArray(session.turns)) session.turns = []; - return session.turns; -} - -function getOrCreateTurnEntry(session, turnIndex, timestamp) { - const turns = ensureSessionTurns(session); - const safeTurnIndex = Number.isFinite(Number(turnIndex)) ? Number(turnIndex) : Math.max(0, turns.length); - let entry = turns.find((turn) => turn?.turnIndex === safeTurnIndex); - if (!entry) { - entry = { - turnIndex: safeTurnIndex, - startedAt: timestamp, - endedAt: timestamp, - durationMs: 0, - inputTokens: 0, - outputTokens: 0, - totalTokens: 0, - userMessageId: null, - assistantMessageId: null, - status: "pending", - }; - turns.push(entry); - turns.sort((a, b) => Number(a?.turnIndex || 0) - Number(b?.turnIndex || 0)); - } - return entry; -} - -function updateTurnTimeline(session, msg) { - if (!session || !msg) return; - const timestampMs = parseTimestampMs(msg.timestamp, session.lastActivityAt || Date.now()); - const derivedTurnIndex = Number.isFinite(Number(msg.turnIndex)) - ? Number(msg.turnIndex) - : Math.max(0, Number(session.turnCount || 0) - (String(msg.role || "").toLowerCase() === "assistant" ? 1 : 0)); - const turnIndex = derivedTurnIndex; - const turn = getOrCreateTurnEntry(session, turnIndex, timestampMs); - turn.startedAt = Math.min(Number(turn.startedAt || timestampMs), timestampMs); - turn.endedAt = Math.max(Number(turn.endedAt || timestampMs), timestampMs); - - const role = String(msg.role || "").toLowerCase(); - if (role === "user") { - turn.userMessageId = msg.id || turn.userMessageId; - turn.startedAt = timestampMs; - if (turn.status === "pending") turn.status = "in_progress"; - } - if (role === "assistant") { - turn.assistantMessageId = msg.id || turn.assistantMessageId; - turn.endedAt = timestampMs; - turn.status = "completed"; - } - - const usage = extractUsageFromMeta(msg.meta); - if (usage) { - turn.inputTokens = Math.max(turn.inputTokens || 0, usage.inputTokens || 0); - turn.outputTokens = Math.max(turn.outputTokens || 0, usage.outputTokens || 0); - turn.totalTokens = Math.max(turn.totalTokens || 0, usage.totalTokens || 0); - if (!session.insights || typeof session.insights !== "object") { - session.insights = {}; - } - const priorUsage = extractUsageFromMeta(session.insights.tokenUsage) || { - inputTokens: 0, - outputTokens: 0, - totalTokens: 0, - }; - session.insights.tokenUsage = { - inputTokens: Math.max(priorUsage.inputTokens, usage.inputTokens || 0), - outputTokens: Math.max(priorUsage.outputTokens, usage.outputTokens || 0), - totalTokens: Math.max(priorUsage.totalTokens, usage.totalTokens || 0), - }; - } - turn.durationMs = Math.max(0, Number(turn.endedAt || timestampMs) - Number(turn.startedAt || timestampMs)); - const derivedInsights = buildSessionInsights({ - ...session, - insights: null, - messages: Array.isArray(session.messages) ? session.messages : [], - }); - session.insights = { - ...(session.insights && typeof session.insights === "object" ? session.insights : {}), - ...derivedInsights, - turnTimeline: derivedInsights.turnTimeline, - turns: derivedInsights.turns, - }; -} - /** Debounce interval for disk writes (ms). */ const FLUSH_INTERVAL_MS = 2000; -const DERIVED_STATE_REFRESH_MS = 250; -const PERSISTED_SESSION_LIST_CACHE_TTL_MS = 5000; const SESSION_EVENT_LISTENERS = new Set(); const SESSION_STATE_LISTENERS = new Set(); @@ -441,9 +141,6 @@ function emitSessionEvent(session, message) { status: session.status || "active", lastActiveAt: session.lastActiveAt || new Date().toISOString(), turnCount: session.turnCount || 0, - turns: Array.isArray(session.turns) - ? session.turns.map((turn) => ({ ...turn })) - : [], }, }; for (const listener of SESSION_EVENT_LISTENERS) { @@ -469,9 +166,6 @@ function emitSessionStateEvent(session, reason, extra = {}) { status: session.status || "active", lastActiveAt: session.lastActiveAt || new Date().toISOString(), turnCount: session.turnCount || 0, - turns: Array.isArray(session.turns) - ? session.turns.map((turn) => ({ ...turn })) - : [], title: session.taskTitle || session.title || null, }, event: { @@ -507,18 +201,12 @@ export class SessionTracker { /** @type {Set} session IDs with pending disk writes */ #dirty = new Set(); - /** @type {{ loadedAt: number, sessions: Array }} */ - #persistedSummaryCache = { loadedAt: 0, sessions: [] }; - /** @type {ReturnType|null} */ #flushTimer = null; /** @type {ReturnType|null} */ #reaperTimer = null; - /** @type {Map>} */ - #derivedRefreshTimers = new Map(); - /** * @param {Object} [options] * @param {number} [options.maxMessages=10] @@ -533,6 +221,7 @@ export class SessionTracker { if (this.#persistDir) { this.#ensureDir(); this.#loadFromDisk(); + this.#purgeExcessFiles(); this.#flushTimer = setInterval(() => this.#flushDirty(), FLUSH_INTERVAL_MS); if (this.#flushTimer.unref) this.#flushTimer.unref(); } @@ -570,7 +259,6 @@ export class SessionTracker { messages: [], totalEvents: 0, turnCount: 0, - turns: [], status: "active", accumulatedAt: null, lastActivityAt: Date.now(), @@ -630,12 +318,11 @@ export class SessionTracker { timestamp: new Date().toISOString(), }; session.messages.push(msg); - updateTurnTimeline(session, msg); if (Number.isFinite(maxMessages) && maxMessages > 0) { while (session.messages.length > maxMessages) session.messages.shift(); } this.#appendTrajectoryStep(session, event); - this.#scheduleDerivedStateRefresh(session); + this.#refreshDerivedState(session); this.#markDirty(taskId); emitSessionEvent(session, msg); return; @@ -644,8 +331,6 @@ export class SessionTracker { // Direct message format (role/content) if (event && event.role && event.content !== undefined) { markActivity(); - const role = String(event.role || "").toLowerCase(); - const isAssistantTurn = role === "assistant"; const msg = { id: event.id || `msg-${Date.now()}-${randomToken(6)}`, type: event.type || undefined, @@ -665,25 +350,19 @@ export class SessionTracker { : undefined, _cachedLogId: event._cachedLogId || undefined, }; - if (isAssistantTurn) { - session.turnCount += 1; - } + session.turnCount++; session.messages.push(msg); - updateTurnTimeline(session, msg); if (Number.isFinite(maxMessages) && maxMessages > 0) { while (session.messages.length > maxMessages) session.messages.shift(); } this.#appendTrajectoryStep(session, event); - this.#scheduleDerivedStateRefresh(session); + this.#refreshDerivedState(session); this.#markDirty(taskId); emitSessionEvent(session, msg); return; } const msg = this.#normalizeEvent(event); - if (msg && !Number.isFinite(Number(msg.turnIndex))) { - msg.turnIndex = session.turnCount || 0; - } if (!msg) { return; // Ignore low-signal events that should not mask idle/stalled sessions } @@ -691,40 +370,30 @@ export class SessionTracker { markActivity(); // Push to ring buffer (keep only last N) session.messages.push(msg); - updateTurnTimeline(session, msg); if (Number.isFinite(maxMessages) && maxMessages > 0) { while (session.messages.length > maxMessages) session.messages.shift(); } this.#appendTrajectoryStep(session, event); - this.#scheduleDerivedStateRefresh(session); + this.#refreshDerivedState(session); this.#markDirty(taskId); emitSessionEvent(session, msg); } - /** - * Backward-compatible alias for older callers/tests. - * @param {string} taskId - * @param {Object|string} event - */ - appendEvent(taskId, event) { - return this.recordEvent(taskId, event); - } - /** * Mark a session as completed. * @param {string} taskId - * @param {string} [status="completed"] + * @param {"completed"|"failed"|"idle"} [status="completed"] */ endSession(taskId, status = "completed") { const session = this.#sessions.get(taskId); if (!session) return; session.endedAt = Date.now(); - session.status = deriveTerminalSessionStatus(session, status); - this.#scheduleDerivedStateRefresh(session, { force: true }); + session.status = status; + this.#refreshDerivedState(session); this.#accumulateCompletedSession(session, taskId); this.#markDirty(taskId); - emitSessionStateEvent(session, "session-ended", { status: session.status }); + emitSessionStateEvent(session, "session-ended", { status }); } /** @@ -892,7 +561,6 @@ export class SessionTracker { this.#accumulateCompletedSession(session, taskId); this.#sessions.delete(taskId); this.#dirty.delete(taskId); - this.#invalidatePersistedSummaryCache(); // Remove persisted session file if it exists if (this.#persistDir) { try { @@ -951,7 +619,6 @@ export class SessionTracker { messages: [], totalEvents: 0, turnCount: 0, - turns: [], lastActivityAt: Date.now(), accumulatedAt: null, metadata, @@ -970,44 +637,20 @@ export class SessionTracker { /** * List all sessions (metadata only, no full messages). * Sorted by lastActiveAt descending. - * @param {{ includePersisted?: boolean }} [options] * @returns {Array} */ - listAllSessions(options = {}) { - const includePersisted = options.includePersisted !== false; - const byId = new Map(); - const addSummary = (s, options = {}) => { - if (!s) return; - const sessionId = s.id || s.taskId; - const includeRuntimeProgress = options.includeRuntimeProgress !== false; - const progress = includeRuntimeProgress && s.status === "active" - ? this.getProgressStatus(sessionId) + listAllSessions() { + const list = []; + for (const s of this.#sessions.values()) { + const progress = s.status === "active" + ? this.getProgressStatus(s.id || s.taskId) : null; const derivedStatus = progress?.status === "ended" ? "completed" : (progress?.status || s.status); const lastActiveAt = s.lastActiveAt || new Date(s.lastActivityAt).toISOString(); - const turns = Array.isArray(s.turns) - ? s.turns.map((turn) => ({ ...turn })) - : (Array.isArray(s.insights?.turnTimeline) - ? s.insights.turnTimeline.map((turn) => ({ ...turn })) - : []); - const turnTokenUsage = turns.reduce((acc, turn) => ({ - inputTokens: acc.inputTokens + (Number(turn?.inputTokens) || 0), - outputTokens: acc.outputTokens + (Number(turn?.outputTokens) || 0), - totalTokens: acc.totalTokens + (Number(turn?.totalTokens) || 0), - }), { inputTokens: 0, outputTokens: 0, totalTokens: 0 }); - const tokenUsage = s.insights?.tokenUsage - || (turnTokenUsage.totalTokens > 0 || turnTokenUsage.inputTokens > 0 || turnTokenUsage.outputTokens > 0 - ? turnTokenUsage - : null); - const effectiveTokenUsage = extractUsageFromMeta(tokenUsage) || { - inputTokens: 0, - outputTokens: 0, - totalTokens: 0, - }; - byId.set(sessionId, { - id: sessionId, + list.push({ + id: s.id || s.taskId, taskId: s.taskId, title: s.taskTitle || s.title || null, type: s.type || "task", @@ -1020,11 +663,6 @@ export class SessionTracker { workspaceDir: String(s?.metadata?.workspaceDir || "").trim() || null, branch: String(s?.metadata?.branch || "").trim() || null, turnCount: s.turnCount || 0, - turns, - tokenCount: effectiveTokenUsage?.totalTokens || 0, - inputTokens: effectiveTokenUsage?.inputTokens || 0, - outputTokens: effectiveTokenUsage?.outputTokens || 0, - tokenUsage: effectiveTokenUsage, createdAt: s.createdAt || new Date(s.startedAt).toISOString(), lastActiveAt, idleMs: progress?.idleMs ?? 0, @@ -1032,32 +670,9 @@ export class SessionTracker { recommendation: progress?.recommendation || "none", preview: this.#lastMessagePreview(s), lastMessage: this.#lastMessagePreview(s), - totalTokens: Number(tokenUsage?.totalTokens || 0), - inputTokens: Number(tokenUsage?.inputTokens || 0), - outputTokens: Number(tokenUsage?.outputTokens || 0), insights: s.insights || null, }); - }; - - for (const s of this.#sessions.values()) { - addSummary(s); - } - - if (includePersisted) { - for (const persisted of this.#readPersistedSessionSummaries()) { - if (!persisted) continue; - const sessionId = persisted.id || persisted.taskId; - if (!sessionId || byId.has(sessionId)) continue; - byId.set(sessionId, { - ...persisted, - turns: Array.isArray(persisted.turns) - ? persisted.turns.map((turn) => ({ ...turn })) - : [], - }); - } } - - const list = [...byId.values()]; list.sort((a, b) => (b.lastActiveAt || "").localeCompare(a.lastActiveAt || "")); return list; } @@ -1068,25 +683,7 @@ export class SessionTracker { * @returns {Object|null} */ getSessionMessages(sessionId) { - let session = this.#sessions.get(sessionId); - if (!session && this.#persistDir) { - try { - const filePath = this.#sessionFilePath(sessionId); - if (existsSync(filePath)) { - const raw = readFileSync(filePath, "utf8"); - const restored = buildSessionRecordFromPersistedData(JSON.parse(raw || "{}"), this.#idleThresholdMs); - if (restored) { - if (this.#sessions.size >= MAX_SESSIONS && !this.#sessions.has(restored.id)) { - this.#evictOldest(); - } - this.#sessions.set(restored.id, restored); - session = restored; - } - } - } catch { - session = null; - } - } + const session = this.#sessions.get(sessionId); if (!session) return null; return { ...session }; } @@ -1108,16 +705,14 @@ export class SessionTracker { updateSessionStatus(sessionId, status) { const session = this.#sessions.get(sessionId); if (!session) return; - session.status = isTerminalSessionStatus(status) - ? deriveTerminalSessionStatus(session, status) - : normalizeSessionStatus(status, "active"); - if (isTerminalSessionStatus(session.status)) { + session.status = status; + if (status === "completed" || status === "archived" || status === "failed" || status === "idle") { session.endedAt = Date.now(); } - this.#scheduleDerivedStateRefresh(session, { force: true }); + this.#refreshDerivedState(session); this.#accumulateCompletedSession(session, sessionId); this.#markDirty(sessionId); - emitSessionStateEvent(session, "session-status", { status: session.status }); + emitSessionStateEvent(session, "session-status", { status }); } /** @@ -1196,7 +791,7 @@ export class SessionTracker { target.editedAt = new Date().toISOString(); session.lastActivityAt = Date.now(); session.lastActiveAt = new Date().toISOString(); - this.#scheduleDerivedStateRefresh(session, { force: true }); + this.#refreshDerivedState(session); this.#markDirty(sessionId); return { ok: true, message: { ...target }, index: idx }; @@ -1228,12 +823,6 @@ export class SessionTracker { clearInterval(this.#reaperTimer); this.#reaperTimer = null; } - for (const [sessionId, timer] of this.#derivedRefreshTimers.entries()) { - clearTimeout(timer); - this.#derivedRefreshTimers.delete(sessionId); - const session = this.#sessions.get(sessionId); - if (session) this.#refreshDerivedState(session); - } this.#flushDirty(); } @@ -1245,7 +834,6 @@ export class SessionTracker { refreshFromDisk() { if (!this.#persistDir) return; this.#ensureDir(); - this.#invalidatePersistedSummaryCache(); let files = []; try { files = readdirSync(this.#persistDir).filter((f) => f.endsWith(".json")); @@ -1285,10 +873,35 @@ export class SessionTracker { const available = MAX_SESSIONS - this.#sessions.size; const toLoad = parsed.slice(0, Math.max(0, available)); - for (const { data } of toLoad) { - const restored = buildSessionRecordFromPersistedData(data, this.#idleThresholdMs); - if (!restored) continue; - this.#sessions.set(restored.id, restored); + for (const { data, lastActive } of toLoad) { + const sessionId = String(data.id || data.taskId || "").trim(); + // Heal stale "active" sessions + let status = data.status || "completed"; + let endedAt = data.endedAt || null; + if (status === "active" && lastActive > 0) { + const ageMs = Date.now() - lastActive; + if (ageMs > this.#idleThresholdMs) { + status = "completed"; + endedAt = endedAt || lastActive; + } + } + this.#sessions.set(sessionId, { + taskId: data.taskId || sessionId, + taskTitle: data.title || data.taskTitle || null, + id: sessionId, + type: data.type || "task", + startedAt: Date.parse(data.createdAt || "") || Date.now(), + createdAt: data.createdAt || new Date().toISOString(), + lastActiveAt: data.lastActiveAt || data.updatedAt || new Date().toISOString(), + endedAt, + messages: Array.isArray(data.messages) ? data.messages : [], + totalEvents: Array.isArray(data.messages) ? data.messages.length : 0, + turnCount: data.turnCount || 0, + status, + lastActivityAt: lastActive || Date.now(), + metadata: data.metadata || {}, + insights: data.insights || buildSessionInsights({ messages: data.messages || [] }), + }); } } @@ -1339,7 +952,7 @@ export class SessionTracker { if (session.status !== "active") continue; const idleMs = now - (session.lastActivityAt || session.startedAt || now); if (idleMs > this.#idleThresholdMs) { - session.status = deriveIdleTerminalSessionStatus(session); + session.status = "completed"; session.endedAt = now; this.#refreshDerivedState(session); this.#accumulateCompletedSession(session, id); @@ -1380,16 +993,7 @@ export class SessionTracker { const startedAt = Number.isFinite(Number(session.startedAt)) ? Number(session.startedAt) : endedAt; - const turns = Array.isArray(session.turns) ? session.turns : []; - const turnTokenUsage = turns.reduce((acc, turn) => ({ - inputTokens: acc.inputTokens + (Number(turn?.inputTokens) || 0), - outputTokens: acc.outputTokens + (Number(turn?.outputTokens) || 0), - totalTokens: acc.totalTokens + (Number(turn?.totalTokens) || 0), - }), { inputTokens: 0, outputTokens: 0, totalTokens: 0 }); - const tokenUsage = session.insights?.tokenUsage - || (turnTokenUsage.totalTokens > 0 || turnTokenUsage.inputTokens > 0 || turnTokenUsage.outputTokens > 0 - ? turnTokenUsage - : null); + const tokenUsage = session.insights?.tokenUsage || null; addCompletedSession({ id: session.id || taskId, @@ -1402,8 +1006,6 @@ export class SessionTracker { startedAt, endedAt, durationMs: Math.max(0, endedAt - startedAt), - turnCount: session.turnCount || 0, - turns: turns.map((turn) => ({ ...turn })), tokenCount: tokenUsage?.totalTokens || 0, inputTokens: tokenUsage?.inputTokens || 0, outputTokens: tokenUsage?.outputTokens || 0, @@ -1428,7 +1030,6 @@ export class SessionTracker { #extractTrajectoryStep(event, session) { const ts = new Date().toISOString(); - const eventTimestamp = String(event?.timestamp || event?.item?.timestamp || "").trim() || ts; const id = `step-${Date.now()}-${randomToken(6)}`; // String event @@ -1449,13 +1050,13 @@ export class SessionTracker { if (event?.type === "item.started" && event?.item) { const item = event.item; if (item.type === "command_execution") { - return { id, kind: "tool_call", summary: `Ran ${item.command || "unknown"}`, timestamp: eventTimestamp }; + return { id, kind: "tool_call", summary: `Ran ${item.command || "unknown"}`, timestamp: ts }; } if (item.type === "reasoning") { - return { id, kind: "reasoning", summary: item.text || "", timestamp: eventTimestamp }; + return { id, kind: "reasoning", summary: item.text || "", timestamp: ts }; } if (item.type === "function_call" || item.type === "mcp_tool_call") { - return { id, kind: "tool_call", summary: `${item.name || "call"} ${item.arguments || ""}`.trim(), timestamp: eventTimestamp }; + return { id, kind: "tool_call", summary: `${item.name || "call"} ${item.arguments || ""}`.trim(), timestamp: ts }; } return null; } @@ -1464,10 +1065,10 @@ export class SessionTracker { if (event?.type === "item.completed" && event?.item) { const item = event.item; if (item.type === "reasoning") { - return { id, kind: "reasoning", summary: item.text || "", timestamp: eventTimestamp }; + return { id, kind: "reasoning", summary: item.text || "", timestamp: ts }; } if (item.type === "function_call" || item.type === "mcp_tool_call") { - return { id, kind: "tool_call", summary: `${item.name || "call"} ${item.arguments || ""}`.trim(), timestamp: eventTimestamp }; + return { id, kind: "tool_call", summary: `${item.name || "call"} ${item.arguments || ""}`.trim(), timestamp: ts }; } if (item.type === "command_execution") { const cmd = item.command || ""; @@ -1475,12 +1076,12 @@ export class SessionTracker { (s) => s.kind === "tool_call" && s.summary === `Ran ${cmd}`, ); if (hasPriorStart) { - return { id, kind: "tool_result", summary: `${cmd} (exit ${item.exit_code ?? "?"})`, timestamp: eventTimestamp }; + return { id, kind: "tool_result", summary: `${cmd} (exit ${item.exit_code ?? "?"})`, timestamp: ts }; } - return { id, kind: "command", summary: cmd, timestamp: eventTimestamp }; + return { id, kind: "command", summary: cmd, timestamp: ts }; } if (item.type === "agent_message") { - return { id, kind: "assistant", summary: item.text || "", timestamp: eventTimestamp }; + return { id, kind: "assistant", summary: item.text || "", timestamp: ts }; } return null; } @@ -1488,7 +1089,7 @@ export class SessionTracker { // Assistant message events if (event?.type === "assistant.message") { const content = event?.data?.content || event?.content || ""; - return { id, kind: "agent_message", summary: content.slice(0, 200), timestamp: eventTimestamp }; + return { id, kind: "agent_message", summary: content.slice(0, 200), timestamp: ts }; } return null; @@ -1501,37 +1102,6 @@ export class SessionTracker { ...session, insights: null, }); - const priorTurns = Array.isArray(session.turns) ? session.turns : []; - const priorTurnsByIndex = new Map( - priorTurns.map((turn) => [Number(turn?.turnIndex || 0), turn]), - ); - session.turns = Array.isArray(session.insights?.turnTimeline) - ? session.insights.turnTimeline.map((turn) => { - const turnIndex = Number(turn?.turnIndex || 0); - const priorTurn = priorTurnsByIndex.get(turnIndex) || {}; - return { - turnIndex, - startedAt: turn?.startedAt ? parseTimestampMs(turn.startedAt) : (priorTurn.startedAt ?? null), - endedAt: turn?.endedAt ? parseTimestampMs(turn.endedAt) : (priorTurn.endedAt ?? null), - durationMs: Math.max(0, Number(turn?.durationMs || priorTurn.durationMs || 0)), - inputTokens: Math.max( - normalizeTokenNumber(turn?.inputTokens), - normalizeTokenNumber(priorTurn.inputTokens), - ), - outputTokens: Math.max( - normalizeTokenNumber(turn?.outputTokens), - normalizeTokenNumber(priorTurn.outputTokens), - ), - totalTokens: Math.max( - normalizeTokenNumber(turn?.totalTokens), - normalizeTokenNumber(priorTurn.totalTokens), - ), - userMessageId: priorTurn.userMessageId || null, - assistantMessageId: priorTurn.assistantMessageId || null, - status: priorTurn.status || (turn?.endedAt ? "completed" : "in_progress"), - }; - }) - : priorTurns; } catch { // Inspector insights are best-effort only. } @@ -1553,58 +1123,12 @@ export class SessionTracker { } } - #scheduleDerivedStateRefresh(session, options = {}) { - if (!session) return; - const sessionId = String(session.id || session.taskId || "").trim(); - if (!sessionId) { - this.#refreshDerivedState(session); - return; - } - const force = options.force === true; - const existingTimer = this.#derivedRefreshTimers.get(sessionId); - if (existingTimer) { - clearTimeout(existingTimer); - this.#derivedRefreshTimers.delete(sessionId); - } - if (force) { - this.#refreshDerivedState(session); - session._derivedStateRefreshedAt = Date.now(); - return; - } - const now = Date.now(); - const lastRefreshedAt = Number(session._derivedStateRefreshedAt || 0); - const elapsedMs = now - lastRefreshedAt; - if (elapsedMs >= DERIVED_STATE_REFRESH_MS) { - this.#refreshDerivedState(session); - session._derivedStateRefreshedAt = now; - return; - } - const delayMs = Math.max(0, DERIVED_STATE_REFRESH_MS - elapsedMs); - const timer = setTimeout(() => { - this.#derivedRefreshTimers.delete(sessionId); - const currentSession = this.#sessions.get(sessionId); - if (!currentSession) return; - this.#refreshDerivedState(currentSession); - currentSession._derivedStateRefreshedAt = Date.now(); - }, delayMs); - if (timer.unref) timer.unref(); - this.#derivedRefreshTimers.set(sessionId, timer); - } - #ensureDir() { if (this.#persistDir && !existsSync(this.#persistDir)) { mkdirSync(this.#persistDir, { recursive: true }); } } - #safeSessionFileMtime(file) { - try { - return statSync(resolve(this.#persistDir, file)).mtimeMs; - } catch { - return 0; - } - } - #sessionFilePath(sessionId) { // Sanitize sessionId for filesystem safety const safe = String(sessionId).replace(/[^a-zA-Z0-9_\-\.]/g, "_"); @@ -1618,8 +1142,6 @@ export class SessionTracker { const session = this.#sessions.get(sessionId); if (!session) continue; try { - this.#refreshDerivedState(session); - session._derivedStateRefreshedAt = Date.now(); const filePath = this.#sessionFilePath(sessionId); const data = { id: session.id || session.taskId, @@ -1635,7 +1157,6 @@ export class SessionTracker { endedAt: session.endedAt || null, accumulatedAt: session.accumulatedAt || null, turnCount: session.turnCount || 0, - turns: Array.isArray(session.turns) ? session.turns : [], messages: session.messages || [], metadata: session.metadata || {}, insights: session.insights || null, @@ -1648,7 +1169,6 @@ export class SessionTracker { } } this.#dirty.clear(); - this.#invalidatePersistedSummaryCache(); } /** @type {Set} filenames loaded during #loadFromDisk (for purge) */ @@ -1659,129 +1179,81 @@ export class SessionTracker { try { const files = readdirSync(this.#persistDir).filter((f) => f.endsWith(".json")); - // Keep startup bounded by loading only the newest session files into memory. - // Older sessions remain on disk and are listed/lazy-loaded on demand. - const recentFiles = files - .map((file) => ({ file, mtimeMs: Number(this.#safeSessionFileMtime(file)) || 0 })) - .sort((a, b) => b.mtimeMs - a.mtimeMs) - .slice(0, MAX_SESSIONS); - - for (const { file } of recentFiles) { + // Pre-parse all session files with their timestamps for sorting + /** @type {Array<{file: string, data: Object, lastActive: number}>} */ + const parsed = []; + for (const file of files) { try { const raw = readFileSync(resolve(this.#persistDir, file), "utf8"); const data = JSON.parse(raw); if (!data.id && !data.taskId) continue; - const restored = buildSessionRecordFromPersistedData(data, this.#idleThresholdMs); - if (!restored) continue; - const id = restored.id; - if (this.#sessions.has(id)) continue; - this.#sessions.set(id, restored); - // Skip completed-session accumulation during startup hydration to keep disk-backed reloads fast. - // Sessions are still available for listing and lazy message reads from disk. + const id = data.id || data.taskId; + if (this.#sessions.has(id)) continue; // don't overwrite in-memory + const lastActive = data.lastActiveAt + ? new Date(data.lastActiveAt).getTime() + : data.createdAt + ? new Date(data.createdAt).getTime() + : 0; + parsed.push({ file, data, lastActive }); } catch { // Skip corrupt files } } - } catch { - // Directory read failed — proceed without disk data - } - this.#invalidatePersistedSummaryCache(); - } - - #invalidatePersistedSummaryCache() { - this.#persistedSummaryCache = { loadedAt: 0, sessions: [] }; - } - - #readPersistedSessionSummaries() { - if (!this.#persistDir || !existsSync(this.#persistDir)) { - return []; - } - const now = Date.now(); - if ( - Array.isArray(this.#persistedSummaryCache.sessions) && - now - Number(this.#persistedSummaryCache.loadedAt || 0) < - PERSISTED_SESSION_LIST_CACHE_TTL_MS - ) { - return this.#persistedSummaryCache.sessions; - } + // Sort by lastActive descending (newest first) and keep only MAX_SESSIONS + parsed.sort((a, b) => b.lastActive - a.lastActive); + const toLoad = parsed.slice(0, MAX_SESSIONS); + + // Track which files were loaded so #purgeExcessFiles can remove the rest + this.#loadedFiles = new Set(toLoad.map((p) => p.file)); + + for (const { data, lastActive } of toLoad) { + const id = data.id || data.taskId; + // Heal stale "active" sessions — if restored from disk and the last + // activity was more than idleThresholdMs ago, mark as completed. + let status = data.status || "completed"; + let endedAt = data.endedAt || null; + if (status === "active" && lastActive > 0) { + const ageMs = Date.now() - lastActive; + if (ageMs > this.#idleThresholdMs) { + status = "completed"; + endedAt = endedAt || lastActive; + } + } - const sessions = []; - try { - const files = readdirSync(this.#persistDir).filter((f) => f.endsWith(".json")); - for (const file of files) { - try { - const raw = readFileSync(resolve(this.#persistDir, file), "utf8"); - const restored = buildSessionRecordFromPersistedData( - JSON.parse(raw || "{}"), - this.#idleThresholdMs, - ); - if (!restored) continue; - const sessionId = restored.id || restored.taskId; - const lastActiveAt = - restored.lastActiveAt || new Date(restored.lastActivityAt).toISOString(); - const turns = Array.isArray(restored.turns) - ? restored.turns.map((turn) => ({ ...turn })) - : []; - const turnTokenUsage = turns.reduce((acc, turn) => ({ - inputTokens: acc.inputTokens + (Number(turn?.inputTokens) || 0), - outputTokens: acc.outputTokens + (Number(turn?.outputTokens) || 0), - totalTokens: acc.totalTokens + (Number(turn?.totalTokens) || 0), - }), { inputTokens: 0, outputTokens: 0, totalTokens: 0 }); - const tokenUsage = restored.insights?.tokenUsage - || (turnTokenUsage.totalTokens > 0 || turnTokenUsage.inputTokens > 0 || turnTokenUsage.outputTokens > 0 - ? turnTokenUsage - : null); - const effectiveTokenUsage = extractUsageFromMeta(tokenUsage) || { - inputTokens: 0, - outputTokens: 0, - totalTokens: 0, - }; - sessions.push({ - id: sessionId, - taskId: restored.taskId, - title: restored.taskTitle || restored.title || null, - type: restored.type || "task", - status: restored.status || "completed", - lifecycleStatus: restored.status || "completed", - runtimeState: null, - runtimeUpdatedAt: lastActiveAt, - runtimeIsLive: false, - workspaceId: String(restored?.metadata?.workspaceId || "").trim() || null, - workspaceDir: String(restored?.metadata?.workspaceDir || "").trim() || null, - branch: String(restored?.metadata?.branch || "").trim() || null, - turnCount: restored.turnCount || 0, - turns, - tokenCount: effectiveTokenUsage.totalTokens || 0, - inputTokens: effectiveTokenUsage.inputTokens || 0, - outputTokens: effectiveTokenUsage.outputTokens || 0, - tokenUsage: effectiveTokenUsage, - createdAt: restored.createdAt || new Date(restored.startedAt).toISOString(), - lastActiveAt, - idleMs: 0, - elapsedMs: Math.max( - 0, - Number(restored.endedAt || Date.now()) - Number(restored.startedAt || Date.now()), - ), - recommendation: "none", - preview: this.#lastMessagePreview(restored), - lastMessage: this.#lastMessagePreview(restored), - insights: restored.insights || null, - }); - } catch { - // Ignore corrupt session files in summary listing + this.#sessions.set(id, { + id, + taskId: data.taskId || id, + taskTitle: data.taskTitle || data.title || data.metadata?.title || id, + sessionKey: + String(data.sessionKey || "").trim() || + `${data.taskId || id}:${data.startedAt || Date.now()}:${endedAt || data.startedAt || Date.now()}`, + type: data.type || "task", + status, + createdAt: data.createdAt || new Date().toISOString(), + lastActiveAt: data.lastActiveAt || new Date().toISOString(), + startedAt: data.startedAt || (data.createdAt ? new Date(data.createdAt).getTime() : Date.now()), + endedAt, + messages: data.messages || [], + totalEvents: (data.messages || []).length, + turnCount: data.turnCount || 0, + accumulatedAt: data.accumulatedAt || null, + lastActivityAt: lastActive || Date.now(), + metadata: data.metadata || {}, + insights: data.insights || buildSessionInsights({ messages: data.messages || [] }), + trajectory: data.trajectory || { version: 1, replayable: true, steps: [] }, + summary: data.summary || null, + }); + const restored = this.#sessions.get(id); + if (restored && isTerminalSessionStatus(restored.status) && !restored.accumulatedAt) { + if (this.#accumulateCompletedSession(restored, id)) { + this.#markDirty(id); + } } } } catch { - // Best-effort disk-backed listing only + // Directory read failed — proceed without disk data } - - sessions.sort((a, b) => (b.lastActiveAt || "").localeCompare(a.lastActiveAt || "")); - this.#persistedSummaryCache = { - loadedAt: now, - sessions, - }; - return sessions; } /** @@ -1789,7 +1261,28 @@ export class SessionTracker { * This runs once at startup to clean up historical bloat. */ #purgeExcessFiles() { - this.#loadedFiles.clear(); + if (!this.#persistDir || !existsSync(this.#persistDir)) return; + try { + const files = readdirSync(this.#persistDir).filter((f) => f.endsWith(".json")); + let purged = 0; + for (const file of files) { + if (!this.#loadedFiles.has(file)) { + try { + unlinkSync(resolve(this.#persistDir, file)); + purged++; + } catch { + // best-effort cleanup + } + } + } + if (purged > 0) { + console.log(`${TAG} purged ${purged} excess session file(s) from disk`); + } + // Free the reference — only needed once at startup + this.#loadedFiles.clear(); + } catch { + // best-effort + } } /** @@ -1804,7 +1297,6 @@ export class SessionTracker { if (!event || !event.type) return null; const ts = new Date().toISOString(); - const eventTimestamp = String(event.timestamp || "").trim() || ts; const toText = (value) => { if (value == null) return ""; if (typeof value === "string") return value; @@ -2064,7 +1556,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "system", content: "Turn completed", - timestamp: eventTimestamp, + timestamp: ts, meta: { lifecycle: "turn_completed" }, }; } @@ -2073,7 +1565,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "system", content: "Session completed", - timestamp: eventTimestamp, + timestamp: ts, meta: { lifecycle: "session_completed" }, }; } @@ -2083,7 +1575,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "error", content: `Turn failed: ${detail}`.slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, }; } @@ -2091,7 +1583,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "agent_message", content: toText(event.data.content).slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, }; } @@ -2099,7 +1591,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "agent_message", content: toText(event.data.deltaContent).slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, }; } @@ -2109,7 +1601,7 @@ ${items.join("\n")}` : "todo updated"; type: "agent_message", content: (typeof event.content === "string" ? event.content : JSON.stringify(event.content)) .slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, }; } @@ -2117,7 +1609,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "tool_call", content: `${event.name || event.tool || "tool"}(${(event.arguments || event.input || "").slice(0, 500)})`, - timestamp: eventTimestamp, + timestamp: ts, meta: { toolName: event.name || event.tool }, }; } @@ -2126,7 +1618,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "tool_result", content: (event.output || event.result || "").slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, }; } @@ -2135,7 +1627,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "agent_message", content: event.delta.text.slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, }; } @@ -2144,7 +1636,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "system", content: `${event.type}${event.delta?.stop_reason ? ` (${event.delta.stop_reason})` : ""}`, - timestamp: eventTimestamp, + timestamp: ts, ...(lifecycle ? { meta: { lifecycle } } : {}), }; } @@ -2154,7 +1646,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "error", content: (event.error?.message || event.message || JSON.stringify(event)).slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, }; } @@ -2163,7 +1655,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "system", content: `Voice session started (provider: ${event.provider || "unknown"}, tier: ${event.tier || "?"})`, - timestamp: eventTimestamp, + timestamp: ts, meta: { voiceEvent: "start", provider: event.provider, tier: event.tier }, }; } @@ -2171,7 +1663,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "system", content: `Voice session ended (duration: ${event.duration || 0}s)`, - timestamp: eventTimestamp, + timestamp: ts, meta: { voiceEvent: "end", duration: event.duration }, }; } @@ -2179,7 +1671,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "user", content: (event.text || event.transcript || "").slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, meta: { voiceEvent: "transcript" }, }; } @@ -2187,7 +1679,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "agent_message", content: (event.text || event.response || "").slice(0, MAX_MESSAGE_CHARS), - timestamp: eventTimestamp, + timestamp: ts, meta: { voiceEvent: "response" }, }; } @@ -2195,7 +1687,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "tool_call", content: `voice:${event.name || "tool"}(${(event.arguments || "").slice(0, 500)})`, - timestamp: eventTimestamp, + timestamp: ts, meta: { voiceEvent: "tool_call", toolName: event.name }, }; } @@ -2203,7 +1695,7 @@ ${items.join("\n")}` : "todo updated"; return { type: "system", content: `Voice delegated to ${event.executor || "agent"}: ${(event.message || "").slice(0, 500)}`, - timestamp: eventTimestamp, + timestamp: ts, meta: { voiceEvent: "delegate", executor: event.executor }, }; } @@ -2236,11 +1728,10 @@ ${items.join("\n")}` : "todo updated"; /** * List all sessions (metadata only). - * @param {{ includePersisted?: boolean }} [options] * @returns {Array} */ -export function listAllSessions(options) { - return getSessionTracker().listAllSessions(options); +export function listAllSessions() { + return getSessionTracker().listAllSessions(); } /** @@ -2261,15 +1752,6 @@ export async function createSession(opts) { return getSessionTracker().createSession(opts); } -/** - * Append an event/message to an existing session. - * @param {string} sessionId - * @param {Object|string} event - */ -export function appendEvent(sessionId, event) { - return getSessionTracker().appendEvent(sessionId, event); -} - /** * Update session status. * @param {string} sessionId diff --git a/infra/tracing.mjs b/infra/tracing.mjs index 3db3839ad..f73bc3738 100644 --- a/infra/tracing.mjs +++ b/infra/tracing.mjs @@ -115,7 +115,6 @@ function createNoopState() { spanKind: null, activeSpans: new Map(), finishedSpans: [], - testSpans: [], }; } @@ -163,9 +162,6 @@ function createLocalSpan(name, attributes = {}, parent = null) { name, traceId, spanId, - traceFlags: Number.isFinite(Number(activeParent?.traceFlags)) - ? Number(activeParent.traceFlags) - : 1, parentSpanId: activeParent?.spanId || null, startTime: nowHrTime(), endTime: null, @@ -190,25 +186,22 @@ function attachOtelAttributes(otelSpan, attributes = {}) { function syncSpanContext(span) { const spanContext = span?.otelSpan?.spanContext?.(); if (!spanContext) return; - if (Number.isFinite(Number(spanContext.traceFlags))) { - span.traceFlags = Number(spanContext.traceFlags); - } + span.traceId = spanContext.traceId || span.traceId; + span.spanId = spanContext.spanId || span.spanId; } function finalizeLocalSpan(span) { span.endTime = nowHrTime(); span.durationMs = durationMs(span.startTime); tracingState.activeSpans.delete(span.spanId); - const finishedSpan = { + tracingState.finishedSpans.push({ ...span, attributes: { ...span.attributes }, events: [...span.events], exceptions: [...span.exceptions], links: [...span.links], status: { ...span.status }, - }; - tracingState.finishedSpans.push(finishedSpan); - tracingState.testSpans.push(finishedSpan); + }); } async function loadOtelBindings() { @@ -247,8 +240,8 @@ export async function setupTracing(endpointOrConfig = null) { ? { endpoint: endpointOrConfig } : (endpointOrConfig ?? {}); - const endpoint = process.env.BOSUN_OTEL_ENDPOINT || inputConfig.endpoint || null; - const enabled = endpoint ? true : (inputConfig.enabled ?? false); + const endpoint = inputConfig.endpoint || process.env.BOSUN_OTEL_ENDPOINT || null; + const enabled = inputConfig.enabled ?? Boolean(endpoint); const sampleRate = Number(inputConfig.sampleRate ?? 1); const exportTimeoutMillis = Math.max( 1, @@ -260,20 +253,7 @@ export async function setupTracing(endpointOrConfig = null) { if (!enabled || !endpoint) { tracingState = createNoopState(); ensureMetricInstruments(); - return { - enabled: false, - endpoint: null, - sampleRate: 0, - serviceName: DEFAULT_SERVICE_NAME, - serviceVersion: DEFAULT_SERVICE_VERSION, - config: { - enabled: false, - endpoint: null, - sampleRate: 0, - serviceName: DEFAULT_SERVICE_NAME, - serviceVersion: DEFAULT_SERVICE_VERSION, - }, - }; + return { enabled: false, endpoint: null, sampleRate: 0 }; } const serviceName = inputConfig.serviceName || DEFAULT_SERVICE_NAME; @@ -328,23 +308,7 @@ export async function setupTracing(endpointOrConfig = null) { tracer = otel.api.trace.getTracer(serviceName, serviceVersion); } catch { sdk = null; - tracer = { - startSpan(name, options = {}) { - const attributes = options?.attributes || {}; - return { - setAttributes() {}, - addEvent() {}, - recordException() {}, - setStatus() {}, - end() {}, - spanContext() { - return { traceId: randomId(TRACE_ID_BYTES), spanId: randomId(SPAN_ID_BYTES) }; - }, - attributes, - name, - }; - }, - }; + tracer = null; api = null; statusCodes = null; spanKind = null; @@ -367,7 +331,6 @@ export async function setupTracing(endpointOrConfig = null) { spanKind, activeSpans: new Map(), finishedSpans: [], - testSpans: [], }; ensureMetricInstruments(); @@ -378,35 +341,10 @@ export async function setupTracing(endpointOrConfig = null) { serviceName, serviceVersion, exporter, - config: { - enabled: true, - endpoint, - sampleRate: resolvedSampleRate, - serviceName, - serviceVersion, - }, }; } export function getTracingState() { - const activeSessions = (tracingState.metrics.gauges.get("bosun.agent.sessions.active") || []).reduce( - (total, entry) => total + Number(entry?.value || 0), - 0, - ); - const taskTokensTotal = (tracingState.metrics.counters.get("bosun.task.tokens.total") || []).reduce( - (total, entry) => total + Number(entry?.value || 0), - 0, - ); - const taskCostUsd = (tracingState.metrics.counters.get("bosun.task.cost.usd") || []).reduce( - (total, entry) => total + Number(entry?.value || 0), - 0, - ); - const errorsByType = {}; - for (const entry of tracingState.metrics.counters.get("bosun.agent.errors") || []) { - const type = entry?.attributes?.["bosun.error.type"] || "Error"; - errorsByType[type] = (errorsByType[type] || 0) + Number(entry?.value || 0); - } - return { enabled: tracingState.enabled, endpoint: tracingState.endpoint, @@ -414,35 +352,11 @@ export function getTracingState() { serviceName: tracingState.serviceName, serviceVersion: tracingState.serviceVersion, exporter: tracingState.exporter, - config: { - enabled: tracingState.enabled, - endpoint: tracingState.endpoint, - sampleRate: tracingState.sampleRate, - serviceName: tracingState.serviceName, - serviceVersion: tracingState.serviceVersion, - }, - tracer: tracingState.tracer, - testSpans: [...tracingState.testSpans], - metrics: { - activeSessions, - taskTokensTotal, - taskCostUsd, - errorsByType, - }, }; } export function getFinishedSpans() { - return tracingState.finishedSpans.flatMap((span) => { - if (span?.name !== "bosun.task.execution") { - return [{ ...span }]; - } - // Preserve the canonical span name while keeping legacy read paths stable. - return [ - { ...span }, - { ...span, name: "bosun.task.execute" }, - ]; - }); + return [...tracingState.finishedSpans]; } export function getMetricSnapshot() { @@ -460,8 +374,7 @@ export async function shutdownTracing() { ensureMetricInstruments(); } -export async function resetTracingForTests() { - await shutdownSdk(tracingState.sdk); +export function resetTracingForTests() { tracingState = createNoopState(); metricInstruments = null; ensureMetricInstruments(); @@ -477,19 +390,6 @@ export function addSpanEvent(name, attributes = {}) { } } -export function recordAgentEvent(event = {}) { - if (!event || typeof event !== "object") { - return; - } - const eventName = typeof event.type === "string" && event.type.trim() ? event.type.trim() : "agent:event"; - addSpanEvent(eventName, { - "bosun.task.id": event.taskId, - "bosun.agent.id": event.agentId, - "bosun.event.ts": event.ts, - ...omitUndefined(event.payload || {}), - }); -} - export function recordIntervention(type, attributes = {}) { recordMetric("agentInterventions", "counter", 1, { "bosun.intervention.type": type, @@ -600,7 +500,6 @@ export function getCurrentTraceContext() { name: current.name, traceId: current.traceId, spanId: current.spanId, - traceFlags: current.traceFlags, parentSpanId: current.parentSpanId, attributes: { ...current.attributes }, }; @@ -690,13 +589,6 @@ async function withSpan(name, attributes, fn, hooks = {}, options = {}) { }); } hooks.onError?.(span, error); - if (!hooks.onError) { - recordMetric("agentErrors", "counter", 1, { - "bosun.error.type": error?.name || "Error", - "trace.span_id": span.spanId, - "trace.trace_id": span.traceId, - }); - } throw error; } finally { hooks.onFinally?.(span); @@ -758,7 +650,7 @@ export async function traceWorkflowNode(node = {}, fn) { export async function traceTaskExecution(task = {}, fn) { const remoteParent = buildRemoteParentContext(task?.carrier || task?.headers || null); return withSpan( - "bosun.task.execution", + "bosun.task.execute", { ...resolveSpanAttributes(task), "bosun.task.title": task.title, @@ -849,7 +741,6 @@ export async function traceToolCall(tool = {}, fn) { return withSpan( "bosun.tool.call", { - ...resolveSpanAttributes(tool), "bosun.tool.name": tool.toolName, "bosun.tool.tokens_used": tool.tokensUsed, }, @@ -875,7 +766,7 @@ export async function traceLLMCall(call = {}, fn) { "llm.input_tokens": call.inputTokens, "llm.output_tokens": call.outputTokens, "llm.cost_usd": call.costUsd, - "llm.latency_ms": call.latencyMs ?? call.latency, + "llm.latency_ms": call.latency, }, async (span) => { const startedAt = nowHrTime(); @@ -886,24 +777,12 @@ export async function traceLLMCall(call = {}, fn) { span.attributes["llm.input_tokens"] = inputTokens; span.attributes["llm.output_tokens"] = outputTokens; span.attributes["llm.cost_usd"] = costUsd; - span.attributes["llm.latency_ms"] = Number(result?.latencyMs ?? result?.latency ?? call.latencyMs ?? call.latency ?? durationMs(startedAt)); - const totalTokens = inputTokens + outputTokens; - const currentTaskId = call.taskId || span.attributes["bosun.task.id"]; - const metricAttributes = { - "bosun.task.id": currentTaskId, - "llm.model": call.model, - "trace.span_id": span.spanId, - "trace.trace_id": span.traceId, - }; - if (totalTokens > 0) { - recordMetric("taskTokensTotal", "counter", totalTokens, metricAttributes); - } - if (costUsd > 0) { - recordMetric("taskCostUsd", "counter", costUsd, metricAttributes); - } + span.attributes["llm.latency_ms"] = Number(result?.latency ?? call.latency ?? durationMs(startedAt)); return result; }, ); } ensureMetricInstruments(); + + diff --git a/infra/tui-bridge.mjs b/infra/tui-bridge.mjs index 78518bf58..7149528b0 100644 --- a/infra/tui-bridge.mjs +++ b/infra/tui-bridge.mjs @@ -271,35 +271,17 @@ export function buildMonitorStatsPayload({ agentPool, runtimeStats = {}, uptimeM } export function buildSessionsUpdatePayload(sessions = []) { - return Array.isArray(sessions) - ? sessions.map((session) => { - const normalized = session && typeof session === "object" ? { ...session } : {}; - const tokenUsage = normalized?.insights?.tokenUsage || null; - const inputTokens = Number(normalized.inputTokens ?? tokenUsage?.inputTokens ?? 0); - const outputTokens = Number(normalized.outputTokens ?? tokenUsage?.outputTokens ?? 0); - const totalTokens = Number( - normalized.totalTokens ?? normalized.tokenCount ?? tokenUsage?.totalTokens ?? (inputTokens + outputTokens), - ); - normalized.inputTokens = Number.isFinite(inputTokens) ? Math.max(0, Math.round(inputTokens)) : 0; - normalized.outputTokens = Number.isFinite(outputTokens) ? Math.max(0, Math.round(outputTokens)) : 0; - normalized.totalTokens = Number.isFinite(totalTokens) ? Math.max(0, Math.round(totalTokens)) : (normalized.inputTokens + normalized.outputTokens); - normalized.tokenCount = normalized.totalTokens; - return normalized; - }) - : []; + return Array.isArray(sessions) ? sessions.map((session) => ({ ...session })) : []; } export function buildSessionEventPayload(payload = {}) { const event = payload?.event && typeof payload.event === "object" ? { ...payload.event } : { kind: "message", ...(payload?.message ? { message: payload.message } : {}) }; - const session = payload?.session && typeof payload.session === "object" - ? buildSessionsUpdatePayload([payload.session])[0] - : {}; return { - sessionId: String(payload?.sessionId || session?.id || payload?.session?.id || "").trim(), - taskId: String(payload?.taskId || session?.taskId || payload?.session?.taskId || "").trim(), - session, + sessionId: String(payload?.sessionId || payload?.session?.id || "").trim(), + taskId: String(payload?.taskId || payload?.session?.taskId || "").trim(), + session: payload?.session && typeof payload.session === "object" ? { ...payload.session } : {}, event, }; } diff --git a/infra/update-check.mjs b/infra/update-check.mjs index 4db04fde2..577014184 100644 --- a/infra/update-check.mjs +++ b/infra/update-check.mjs @@ -49,31 +49,6 @@ const NPM_LAUNCH_ERROR_CODES = new Set([ "EPERM", "ETXTBSY", ]); -const BUILTIN_AGENT_SKILL_FILES = [ - "agent-coordination.md", - "background-task-execution.md", - "bosun-agent-api.md", - "code-quality-anti-patterns.md", - "commit-conventions.md", - "custom-tool-creation.md", - "error-recovery.md", - "pr-workflow.md", - "skill-codebase-audit.md", - "tdd-pattern.md", -]; - -function isUpdateCheckTestRuntime() { - return Boolean(process.env.VITEST) || - process.env.NODE_ENV === "test" || - Boolean(process.env.JEST_WORKER_ID); -} - -function isSourceCheckoutRuntime(opts = {}) { - if (opts.allowSourceCheckoutAutoUpdate === true) return false; - if (process.env.BOSUN_FORCE_AUTO_UPDATE === "1") return false; - if (isUpdateCheckTestRuntime()) return false; - return existsSync(resolve(__dirname, "..", ".git")); -} function sanitizeNpmEnv(baseEnv = process.env) { const env = { ...baseEnv }; @@ -125,11 +100,7 @@ function quoteCmdArg(arg) { function runWindowsCmd(candidate, args, options) { const cmdLine = [`"${candidate}"`, ...args.map(quoteCmdArg)].join(" "); - const cmdExe = - process.env.ComSpec || - process.env.COMSPEC || - join(process.env.SystemRoot || "C:\\Windows", "System32", "cmd.exe"); - return execFileSync(cmdExe, ["/d", "/s", "/c", cmdLine], options); + return execFileSync("cmd.exe", ["/d", "/s", "/c", cmdLine], options); } function runNpmCommand(args, options = {}) { @@ -519,12 +490,7 @@ export function getCurrentVersion() { } function getRequiredRuntimeFiles() { - const required = [ - resolve(__dirname, "monitor.mjs"), - resolve(__dirname, "..", "agent", "bosun-skills.mjs"), - ...BUILTIN_AGENT_SKILL_FILES.map((file) => - resolve(__dirname, "..", "agent", "skills", file)), - ]; + const required = [resolve(__dirname, "monitor.mjs")]; const copilotDir = resolve(__dirname, "..", "node_modules", "@github", "copilot"); if (process.platform === "win32" && existsSync(copilotDir)) { required.push(resolve(copilotDir, "conpty_console_list_agent.js")); @@ -595,12 +561,6 @@ export function startAutoUpdateLoop(opts = {}) { console.log("[auto-update] Disabled via BOSUN_SKIP_AUTO_UPDATE=1"); return; } - if (isSourceCheckoutRuntime(opts)) { - console.log( - "[auto-update] Disabled in source checkout (set BOSUN_FORCE_AUTO_UPDATE=1 to override)", - ); - return; - } const intervalMs = Number(process.env.BOSUN_UPDATE_INTERVAL_MS) || @@ -932,11 +892,8 @@ export const __autoUpdateTestHooks = { resetAutoUpdateState, recordAutoUpdateFailure, isAutoUpdateDisabled, - isSourceCheckoutRuntime, - getRequiredRuntimeFiles, classifyInstallError, buildDisableNotice, - runWindowsCmd, AUTO_UPDATE_STATE_FILE, AUTO_UPDATE_FAILURE_LIMIT, AUTO_UPDATE_DISABLE_WINDOW_MS, diff --git a/infra/worktree-recovery-state.mjs b/infra/worktree-recovery-state.mjs index bb78bd046..df55d4a2f 100644 --- a/infra/worktree-recovery-state.mjs +++ b/infra/worktree-recovery-state.mjs @@ -96,7 +96,10 @@ function buildNextWorktreeRecoveryState(currentState, event) { if (normalizedEvent.outcome === "healthy_noop") { return { ...nextState, - health: state.health === "healthy" ? "healthy" : state.health, + health: + state.health === "recovered" + ? "recovered" + : (state.failureStreak > 0 ? state.health : "healthy"), lastHealthyAt: normalizedEvent.timestamp, }; } diff --git a/lib/integrations-registry.mjs b/lib/integrations-registry.mjs deleted file mode 100644 index bb509148a..000000000 --- a/lib/integrations-registry.mjs +++ /dev/null @@ -1,294 +0,0 @@ -/** - * Integrations Registry — catalog of supported integration types and their field schemas. - * - * Each integration definition has: - * id — unique string key - * name — display name - * description — short description - * icon — emoji icon for UI - * fields — array of field descriptors for the secrets form - * docsUrl — optional documentation link - */ - -/** @typedef {{ id: string, label: string, type: "text"|"password"|"url"|"select", required?: boolean, placeholder?: string, options?: string[], helpText?: string }} FieldDef */ -/** @typedef {{ id: string, name: string, description: string, icon: string, fields: FieldDef[], docsUrl?: string }} IntegrationDef */ - -/** @type {IntegrationDef[]} */ -export const INTEGRATIONS = [ - { - id: "github", - name: "GitHub", - description: "GitHub personal access tokens and App credentials", - icon: "🐙", - fields: [ - { - id: "token", - label: "Personal Access Token", - type: "password", - required: false, - placeholder: "ghp_...", - helpText: "Classic or fine-grained PAT with repo scope", - }, - { - id: "appId", - label: "App ID", - type: "text", - required: false, - placeholder: "123456", - helpText: "GitHub App ID (optional, for App-based auth)", - }, - { - id: "privateKey", - label: "App Private Key (PEM)", - type: "password", - required: false, - placeholder: "-----BEGIN RSA PRIVATE KEY-----", - helpText: "GitHub App private key for JWT signing", - }, - { - id: "installationId", - label: "Installation ID", - type: "text", - required: false, - placeholder: "45678901", - helpText: "GitHub App installation ID", - }, - ], - docsUrl: "https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens", - }, - { - id: "telegram", - name: "Telegram", - description: "Telegram Bot API token", - icon: "✈️", - fields: [ - { - id: "botToken", - label: "Bot Token", - type: "password", - required: true, - placeholder: "1234567890:ABCdef...", - helpText: "From @BotFather — used for sending notifications", - }, - { - id: "chatId", - label: "Default Chat ID", - type: "text", - required: false, - placeholder: "-100123456789", - helpText: "Default channel or group chat ID", - }, - ], - docsUrl: "https://core.telegram.org/bots#how-do-i-create-a-bot", - }, - { - id: "azure", - name: "Azure", - description: "Azure service principal or OpenAI endpoint credentials", - icon: "☁️", - fields: [ - { - id: "tenantId", - label: "Tenant ID", - type: "text", - required: false, - placeholder: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", - }, - { - id: "clientId", - label: "Client ID", - type: "text", - required: false, - placeholder: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", - }, - { - id: "clientSecret", - label: "Client Secret", - type: "password", - required: false, - placeholder: "~xxxxxxxx", - }, - { - id: "openAiEndpoint", - label: "Azure OpenAI Endpoint", - type: "url", - required: false, - placeholder: "https://my-resource.openai.azure.com/", - }, - { - id: "openAiApiKey", - label: "Azure OpenAI API Key", - type: "password", - required: false, - placeholder: "xxxxxxxx...", - }, - ], - docsUrl: "https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app", - }, - { - id: "slack", - name: "Slack", - description: "Slack bot or webhook token", - icon: "💬", - fields: [ - { - id: "botToken", - label: "Bot Token", - type: "password", - required: false, - placeholder: "xoxb-...", - helpText: "OAuth bot token with chat:write scope", - }, - { - id: "webhookUrl", - label: "Incoming Webhook URL", - type: "url", - required: false, - placeholder: "https://hooks.slack.com/services/...", - helpText: "Alternative to bot token for simple notifications", - }, - ], - docsUrl: "https://api.slack.com/authentication/token-types", - }, - { - id: "linear", - name: "Linear", - description: "Linear API key for issue tracking", - icon: "📐", - fields: [ - { - id: "apiKey", - label: "API Key", - type: "password", - required: true, - placeholder: "lin_api_...", - helpText: "Personal API key from Linear settings", - }, - ], - docsUrl: "https://developers.linear.app/docs/graphql/working-with-the-graphql-api#personal-api-keys", - }, - { - id: "jira", - name: "Jira", - description: "Jira Cloud API token", - icon: "🔷", - fields: [ - { - id: "baseUrl", - label: "Base URL", - type: "url", - required: true, - placeholder: "https://your-org.atlassian.net", - }, - { - id: "email", - label: "Account Email", - type: "text", - required: true, - placeholder: "you@example.com", - }, - { - id: "apiToken", - label: "API Token", - type: "password", - required: true, - placeholder: "ATATT3x...", - helpText: "Generate at id.atlassian.com/manage-profile/security/api-tokens", - }, - ], - docsUrl: "https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/", - }, - { - id: "openai", - name: "OpenAI", - description: "OpenAI API key", - icon: "🤖", - fields: [ - { - id: "apiKey", - label: "API Key", - type: "password", - required: true, - placeholder: "sk-...", - }, - { - id: "organization", - label: "Organization ID", - type: "text", - required: false, - placeholder: "org-...", - }, - ], - docsUrl: "https://platform.openai.com/api-keys", - }, - { - id: "anthropic", - name: "Anthropic", - description: "Anthropic Claude API key", - icon: "🧠", - fields: [ - { - id: "apiKey", - label: "API Key", - type: "password", - required: true, - placeholder: "sk-ant-...", - }, - ], - docsUrl: "https://docs.anthropic.com/en/api/getting-started", - }, - { - id: "env", - name: "Environment Variable", - description: "Generic environment variable secret", - icon: "🔑", - fields: [ - { - id: "key", - label: "Variable Name", - type: "text", - required: true, - placeholder: "MY_API_KEY", - }, - { - id: "value", - label: "Value", - type: "password", - required: true, - placeholder: "secret-value", - }, - ], - }, - { - id: "custom", - name: "Custom", - description: "Custom key-value credential set", - icon: "🛠️", - fields: [ - { - id: "key", - label: "Key", - type: "text", - required: true, - placeholder: "field-name", - }, - { - id: "value", - label: "Value", - type: "password", - required: true, - placeholder: "secret-value", - }, - ], - }, -]; - -/** @returns {IntegrationDef | undefined} */ -export function getIntegration(id) { - return INTEGRATIONS.find((i) => i.id === id); -} - -/** @returns {string[]} */ -export function getIntegrationIds() { - return INTEGRATIONS.map((i) => i.id); -} diff --git a/lib/mojibake-repair.mjs b/lib/mojibake-repair.mjs deleted file mode 100644 index 31087b5e4..000000000 --- a/lib/mojibake-repair.mjs +++ /dev/null @@ -1,40 +0,0 @@ -const MOJIBAKE_REPLACEMENTS = Object.freeze([ - ["ÔÇö", "—"], - ["ÔåÆ", "→"], - ["ÔǪ", "…"], - ["ÔÇ£", "“"], - ["ÔÇ¥", "”"], - ["ÔÇÖ", "’"], - ["Ôǘ", "‘"], - ["ÔÇ¢", "•"], - ["ÔöÇ", "✓"], - ["—", "—"], - ["–", "–"], - ["…", "…"], - ["“", "“"], - ["â€\u009d", "”"], - ["‘", "‘"], - ["’", "’"], - ["•", "•"], - ["→", "→"], - ["✓", "✓"], -]); - -function repairCommonMojibake(value = "") { - let repaired = String(value ?? ""); - for (const [broken, fixed] of MOJIBAKE_REPLACEMENTS) { - if (!repaired.includes(broken)) continue; - repaired = repaired.split(broken).join(fixed); - } - return repaired; -} - -function detectCommonMojibake(value = "") { - const text = String(value ?? ""); - return MOJIBAKE_REPLACEMENTS.some(([broken]) => text.includes(broken)); -} - -export { - detectCommonMojibake, - repairCommonMojibake, -}; diff --git a/lib/session-insights.mjs b/lib/session-insights.mjs index 835f87738..2673fa076 100644 --- a/lib/session-insights.mjs +++ b/lib/session-insights.mjs @@ -225,110 +225,14 @@ function parseContextBreakdown(text) { function normalizeUsage(value) { if (!value || typeof value !== "object") return null; const input = - Number(value.inputTokens ?? value.input_tokens ?? value.promptTokens ?? value.prompt_tokens ?? value.input ?? value.prompt ?? 0) || 0; + Number(value.input_tokens ?? value.prompt_tokens ?? value.input ?? value.prompt ?? 0) || 0; const output = - Number(value.outputTokens ?? value.output_tokens ?? value.completionTokens ?? value.completion_tokens ?? value.output ?? value.completion ?? 0) || 0; - const total = Number(value.totalTokens ?? value.total_tokens ?? value.total ?? input + output) || 0; + Number(value.output_tokens ?? value.completion_tokens ?? value.output ?? value.completion ?? 0) || 0; + const total = Number(value.total_tokens ?? value.total ?? input + output) || 0; if (input <= 0 && output <= 0 && total <= 0) return null; return { input, output, total }; } -function normalizeTokenUsageMeta(meta) { - if (!meta || typeof meta !== "object") return null; - return normalizeUsage( - meta.tokenUsage - || meta.usage - || meta.tokens - || (meta.inputTokens != null || meta.outputTokens != null || meta.totalTokens != null ? meta : null), - ); -} - -function toTimestampMs(value) { - if (value === null || value === undefined || value === "") return null; - const ms = new Date(value).getTime(); - return Number.isFinite(ms) ? ms : null; -} - -function buildTurnTimeline(messages = []) { - const turns = new Map(); - for (const msg of Array.isArray(messages) ? messages : []) { - if (!msg || !Number.isFinite(Number(msg.turnIndex))) continue; - const turnIndex = Number(msg.turnIndex); - const timestamp = String(msg.timestamp || ""); - const tsMs = toTimestampMs(timestamp); - const entry = turns.get(turnIndex) || { - turn: turnIndex + 1, - index: turnIndex + 1, - turnIndex, - startedAt: null, - endedAt: null, - durationMs: 0, - inputTokens: 0, - outputTokens: 0, - totalTokens: 0, - tokenCount: 0, - toolCalls: 0, - toolResults: 0, - assistantMessages: 0, - errors: 0, - assistantPreview: "", - preview: "", - }; - if (tsMs !== null) { - const startedMs = toTimestampMs(entry.startedAt); - const endedMs = toTimestampMs(entry.endedAt); - if (startedMs === null || tsMs < startedMs) entry.startedAt = timestamp; - if (endedMs === null || tsMs > endedMs) entry.endedAt = timestamp; - } - const type = String(msg.type || "").toLowerCase(); - const role = String(msg.role || "").toLowerCase(); - if (type === "tool_call" && String(msg?.meta?.lifecycle || "").toLowerCase() !== "started") { - entry.toolCalls += 1; - } - if (type === "tool_result" || type === "tool_output") { - entry.toolResults += 1; - } - if (type === "error" || type === "stream_error") { - entry.errors += 1; - } - const usage = normalizeUsage(msg?.meta?.usage) || normalizeUsage(msg?.usage) || null; - if (usage) { - entry.inputTokens += usage.input; - entry.outputTokens += usage.output; - entry.totalTokens += usage.total; - } - const content = toText(msg.content).replace(/\s+/g, " ").trim(); - if (role === "assistant" || type === "agent_message" || type === "assistant_message") { - entry.assistantMessages += 1; - if (!entry.assistantPreview) { - entry.assistantPreview = content.slice(0, 180); - } - if (!entry.preview) { - entry.preview = content.slice(0, 140); - } - } else if ((type === "error" || type === "stream_error") && !entry.preview) { - entry.preview = content.slice(0, 140); - } - turns.set(turnIndex, entry); - } - return Array.from(turns.values()) - .sort((a, b) => a.turnIndex - b.turnIndex) - .map((entry) => { - const startedMs = toTimestampMs(entry.startedAt); - const endedMs = toTimestampMs(entry.endedAt); - return { - ...entry, - durationMs: startedMs !== null && endedMs !== null ? Math.max(0, endedMs - startedMs) : 0, - tokenCount: entry.totalTokens, - tokenUsage: { - inputTokens: entry.inputTokens, - outputTokens: entry.outputTokens, - totalTokens: entry.totalTokens, - }, - preview: entry.preview || entry.assistantPreview || `Turn ${entry.turn}`, - }; - }); -} export function formatCompactCount(value) { const n = Number(value || 0); if (!Number.isFinite(n)) return "0"; @@ -384,7 +288,7 @@ export function buildSessionInsights(fullSession = null) { }); } - const usage = normalizeTokenUsageMeta(msg?.meta) || normalizeUsage(msg?.usage) || null; + const usage = normalizeUsage(msg?.meta?.usage) || normalizeUsage(msg?.usage) || null; if (usage) { tokenUsage.inputTokens += usage.input; tokenUsage.outputTokens += usage.output; @@ -454,7 +358,6 @@ export function buildSessionInsights(fullSession = null) { }; } - const turnTimeline = buildTurnTimeline(messages); const derived = { totals: { messages: messages.length, @@ -486,8 +389,6 @@ export function buildSessionInsights(fullSession = null) { contextWindow, contextBreakdown, tokenUsage, - turnTimeline, - turns: turnTimeline, activityDiff: { files: edited.map((entry) => ({ path: entry.path, @@ -516,15 +417,7 @@ export function buildSessionInsights(fullSession = null) { ? persisted.contextBreakdown : derived.contextBreakdown, tokenUsage: persisted.tokenUsage || derived.tokenUsage, - turnTimeline: Array.isArray(persisted.turnTimeline) - ? persisted.turnTimeline - : (Array.isArray(persisted.turns) ? persisted.turns : derived.turnTimeline), - turns: Array.isArray(persisted.turns) - ? persisted.turns - : (Array.isArray(persisted.turnTimeline) ? persisted.turnTimeline : derived.turns), activityDiff: persisted.activityDiff || derived.activityDiff, generatedAt: persisted.generatedAt || derived.generatedAt, }; } - - diff --git a/lib/vault-keychain.mjs b/lib/vault-keychain.mjs deleted file mode 100644 index 80fb4b292..000000000 --- a/lib/vault-keychain.mjs +++ /dev/null @@ -1,259 +0,0 @@ -/** - * Vault Keychain Adapter — OS-native secret storage for the vault master key. - * - * Storage precedence: - * 1. BOSUN_VAULT_KEY env var (hex string, 32 bytes) — CI / container override - * 2. Windows Credential Manager (via PowerShell) - * 3. macOS Keychain (via `security` CLI) - * 4. Linux Secret Service (via `secret-tool` CLI) - * - * All operations are synchronous (execSync) to keep callers simple. - */ - -import { execFileSync } from "node:child_process"; -import { randomBytes } from "node:crypto"; - -const SERVICE_NAME = "bosun-vault"; -const ACCOUNT_NAME = "master-key"; - -// ─── Platform detection ──────────────────────────────────────────────────────── - -function platform() { - return process.platform; // "win32" | "darwin" | "linux" -} - -// ─── Windows Credential Manager ─────────────────────────────────────────────── - -function winRead() { - try { - const ps = ` -[Net.ServicePointManager]::SecurityProtocol = 'Tls12' -$cred = Get-StoredCredential -Target '${SERVICE_NAME}' -ErrorAction SilentlyContinue -if ($cred) { $cred.GetNetworkCredential().Password } else { '' } -`.trim(); - // Try CredentialManager module first (may not be installed) - const result = execFileSync( - "powershell", - ["-NoProfile", "-NonInteractive", "-Command", ps], - { encoding: "utf8", timeout: 10_000 } - ).trim(); - return result || null; - } catch { - return winReadFallback(); - } -} - -function winReadFallback() { - try { - // Use Windows Credential Manager via DPAPI directly - const ps = ` -Add-Type -AssemblyName System.Security -$cm = [System.Security.Cryptography.ProtectedData] -try { - $target = '${SERVICE_NAME}/${ACCOUNT_NAME}' - $cred = [System.Net.CredentialCache]::DefaultNetworkCredentials - # Fall back: read from generic credential store - $sig = @' -[DllImport("advapi32.dll", EntryPoint="CredReadW", CharSet=CharSet.Unicode, SetLastError=true)] -public static extern bool CredRead(string target, uint type, int reservedFlag, out IntPtr credentialPtr); -[DllImport("advapi32.dll")] -public static extern void CredFree([In] IntPtr cred); -'@ - $WinCred = Add-Type -MemberDefinition $sig -Namespace "WinCred" -Name "NativeMethods" -PassThru - $credPtr = [IntPtr]::Zero - if ($WinCred::CredRead($target, 1, 0, [ref]$credPtr)) { - $cred = [System.Runtime.InteropServices.Marshal]::PtrToStructure($credPtr, [type][System.Net.NetworkCredential]) - Write-Output $cred.Password - $WinCred::CredFree($credPtr) - } -} catch { } -`.trim(); - const result = execFileSync( - "powershell", - ["-NoProfile", "-NonInteractive", "-Command", ps], - { encoding: "utf8", timeout: 10_000 } - ).trim(); - return result || null; - } catch { - return null; - } -} - -function winWrite(hexKey) { - try { - const ps = ` -$target = '${SERVICE_NAME}/${ACCOUNT_NAME}' -$pass = ConvertTo-SecureString $env:BOSUN_VAULT_HEXKEY -AsPlainText -Force -$cred = New-Object System.Management.Automation.PSCredential ($target, $pass) -Add-Type -AssemblyName System.Security -$sig = @' -[DllImport("advapi32.dll", EntryPoint="CredWriteW", CharSet=CharSet.Unicode, SetLastError=true)] -public static extern bool CredWrite([In] ref CREDENTIAL userCredential, [In] uint flags); -[StructLayout(LayoutKind.Sequential, CharSet=CharSet.Unicode)] -public struct CREDENTIAL { - public uint Flags; public uint Type; public string TargetName; - public string Comment; public System.Runtime.InteropServices.ComTypes.FILETIME LastWritten; - public uint CredentialBlobSize; public IntPtr CredentialBlob; public uint Persist; - public uint AttributeCount; public IntPtr Attributes; public string TargetAlias; public string UserName; -} -'@ -$WinCred = Add-Type -MemberDefinition $sig -Namespace "WinCred2" -Name "NativeMethods" -PassThru -$blob = [System.Text.Encoding]::Unicode.GetBytes($env:BOSUN_VAULT_HEXKEY) -$blobPtr = [System.Runtime.InteropServices.Marshal]::AllocHGlobal($blob.Length) -[System.Runtime.InteropServices.Marshal]::Copy($blob, 0, $blobPtr, $blob.Length) -$credential = New-Object WinCred2.NativeMethods+CREDENTIAL -$credential.TargetName = $target; $credential.UserName = 'bosun'; $credential.Type = 1 -$credential.Persist = 2; $credential.CredentialBlob = $blobPtr; $credential.CredentialBlobSize = $blob.Length -$WinCred::CredWrite([ref]$credential, 0) | Out-Null -[System.Runtime.InteropServices.Marshal]::FreeHGlobal($blobPtr) -`.trim(); - execFileSync( - "powershell", - ["-NoProfile", "-NonInteractive", "-Command", ps], - { - encoding: "utf8", - timeout: 10_000, - stdio: ["ignore", "ignore", "ignore"], - env: { ...process.env, BOSUN_VAULT_HEXKEY: hexKey }, - } - ); - return true; - } catch { - return false; - } -} - -// ─── macOS Keychain ──────────────────────────────────────────────────────────── - -function macRead() { - try { - const result = execFileSync( - "security", - ["find-generic-password", "-s", SERVICE_NAME, "-a", ACCOUNT_NAME, "-w"], - { encoding: "utf8", timeout: 10_000, stdio: ["ignore", "pipe", "ignore"] } - ).trim(); - return result || null; - } catch { - return null; - } -} - -function macWrite(hexKey) { - try { - // Delete existing first to avoid duplicate errors - try { - execFileSync( - "security", - ["delete-generic-password", "-s", SERVICE_NAME, "-a", ACCOUNT_NAME], - { timeout: 5_000, stdio: "ignore" } - ); - } catch { /* not found — ok */ } - execFileSync( - "security", - ["add-generic-password", "-s", SERVICE_NAME, "-a", ACCOUNT_NAME, "-w", hexKey], - { encoding: "utf8", timeout: 10_000, stdio: "ignore" } - ); - return true; - } catch { - return false; - } -} - -// ─── Linux Secret Service (secret-tool) ─────────────────────────────────────── - -function linuxRead() { - try { - const result = execFileSync( - "secret-tool", - ["lookup", "service", SERVICE_NAME, "account", ACCOUNT_NAME], - { encoding: "utf8", timeout: 10_000, stdio: ["ignore", "pipe", "ignore"] } - ).trim(); - return result || null; - } catch { - return null; - } -} - -function linuxWrite(hexKey) { - try { - execFileSync( - "secret-tool", - ["store", "--label=Bosun Vault Master Key", "service", SERVICE_NAME, "account", ACCOUNT_NAME], - { - input: hexKey, - encoding: "utf8", - timeout: 10_000, - stdio: ["pipe", "ignore", "ignore"], - } - ); - return true; - } catch { - return false; - } -} - -// ─── Public API ──────────────────────────────────────────────────────────────── - -/** - * Read the vault master key from the OS keychain (or env var). - * Returns a 32-byte Buffer or null if not found. - */ -export function keychainRead() { - // Highest-priority override: env var - if (process.env.BOSUN_VAULT_KEY) { - const buf = Buffer.from(process.env.BOSUN_VAULT_KEY, "hex"); - if (buf.length === 32) return buf; - } - - let hex = null; - if (platform() === "win32") hex = winRead(); - else if (platform() === "darwin") hex = macRead(); - else hex = linuxRead(); - - if (!hex || hex.length !== 64) return null; - return Buffer.from(hex, "hex"); -} - -/** - * Write the vault master key to the OS keychain. - * @param {Buffer} key — 32-byte key - * @returns {boolean} true on success - */ -export function keychainWrite(key) { - if (!Buffer.isBuffer(key) || key.length !== 32) { - throw new Error("Key must be a 32-byte Buffer"); - } - const hex = key.toString("hex"); - if (platform() === "win32") return winWrite(hex); - if (platform() === "darwin") return macWrite(hex); - return linuxWrite(hex); -} - -/** - * Generate a new random 32-byte master key and store it in the OS keychain. - * @returns {Buffer} the new key - */ -export function keychainGenerateAndStore() { - const key = randomBytes(32); - const ok = keychainWrite(key); - if (!ok) { - throw new Error( - "Failed to store vault key in OS keychain. " + - "Set BOSUN_VAULT_KEY env var (64-char hex) as a fallback." - ); - } - return key; -} - -/** - * Attempt to read the key; generate and store a new one if not found. - * Useful for first-run setup. - * @returns {{ key: Buffer, created: boolean }} - */ -export function keychainGetOrCreate() { - const existing = keychainRead(); - if (existing) return { key: existing, created: false }; - const key = keychainGenerateAndStore(); - return { key, created: true }; -} - diff --git a/lib/vault.mjs b/lib/vault.mjs deleted file mode 100644 index e1e36dbb4..000000000 --- a/lib/vault.mjs +++ /dev/null @@ -1,374 +0,0 @@ -/** - * Bosun Vault — AES-256-GCM encrypted credential store. - * - * Storage: ~/.bosun-vault/vault.enc - * Envelope: { v: 1, iv: "", tag: "", data: "" } - * Plaintext: { secrets: {...}, env: {...}, mcpRefs: {...} } - * - * The master key is managed by lib/vault-keychain.mjs. - */ - -import { createCipheriv, createDecipheriv, randomBytes } from "node:crypto"; -import { - existsSync, - mkdirSync, - readFileSync, - writeFileSync, -} from "node:fs"; -import { homedir } from "node:os"; -import { join } from "node:path"; -import { randomUUID } from "node:crypto"; - -const VAULT_VERSION = 1; -const VAULT_DIR = join(homedir(), ".bosun-vault"); -const VAULT_PATH = join(VAULT_DIR, "vault.enc"); -const KEY_BYTES = 32; // 256-bit -const IV_BYTES = 12; // 96-bit GCM nonce -const TAG_BYTES = 16; - -// ─── Encryption helpers ──────────────────────────────────────────────────────── - -function encrypt(key, plaintext) { - const iv = randomBytes(IV_BYTES); - const cipher = createCipheriv("aes-256-gcm", key, iv); - const enc = Buffer.concat([cipher.update(plaintext, "utf8"), cipher.final()]); - const tag = cipher.getAuthTag(); - return { - iv: iv.toString("hex"), - tag: tag.toString("hex"), - data: enc.toString("hex"), - }; -} - -function decrypt(key, iv, tag, data) { - const decipher = createDecipheriv( - "aes-256-gcm", - key, - Buffer.from(iv, "hex") - ); - decipher.setAuthTag(Buffer.from(tag, "hex")); - const dec = Buffer.concat([ - decipher.update(Buffer.from(data, "hex")), - decipher.final(), - ]); - return dec.toString("utf8"); -} - -// ─── Persistence helpers ─────────────────────────────────────────────────────── - -function readEnvelope() { - if (!existsSync(VAULT_PATH)) return null; - return JSON.parse(readFileSync(VAULT_PATH, "utf8")); -} - -function writeEnvelope(envelope) { - if (!existsSync(VAULT_DIR)) mkdirSync(VAULT_DIR, { recursive: true }); - writeFileSync(VAULT_PATH, JSON.stringify(envelope), "utf8"); -} - -// ─── Empty vault payload ─────────────────────────────────────────────────────── - -function emptyPayload() { - return { secrets: {}, env: {}, mcpRefs: {} }; -} - -// ─── VaultStore class ────────────────────────────────────────────────────────── - -export class VaultStore { - constructor() { - this._key = null; // Buffer(32) when unlocked - this._data = null; // decrypted payload object - } - - // ── Lifecycle ──────────────────────────────────────────────────────────────── - - /** Returns true when the vault file exists. */ - isInitialized() { - return existsSync(VAULT_PATH); - } - - /** Returns true when the vault is unlocked (key loaded in memory). */ - isUnlocked() { - return this._key !== null && this._data !== null; - } - - /** - * Initialize a brand-new vault with the given 32-byte key. - * Throws if vault already exists. - */ - init(key) { - if (this.isInitialized()) { - throw new Error("Vault already initialized. Use open() to unlock it."); - } - this._key = this._validateKey(key); - this._data = emptyPayload(); - this._flush(); - return this; - } - - /** - * Open (unlock) an existing vault with the given key. - * Throws if vault does not exist or key is wrong. - */ - open(key) { - const envelope = readEnvelope(); - if (!envelope) { - throw new Error("Vault not initialized. Call init() first."); - } - if (envelope.v !== VAULT_VERSION) { - throw new Error(`Unsupported vault version: ${envelope.v}`); - } - this._key = this._validateKey(key); - const plaintext = decrypt(this._key, envelope.iv, envelope.tag, envelope.data); - this._data = JSON.parse(plaintext); - return this; - } - - /** Lock the vault — clears the key and decrypted data from memory. */ - seal() { - if (this._key) this._key.fill(0); - this._key = null; - this._data = null; - } - - // ── Secrets ────────────────────────────────────────────────────────────────── - - /** - * List all secrets (without field values). - * @returns {{ id, name, integration, label, permissions, createdAt, updatedAt }[]} - */ - listSecrets() { - this._requireUnlocked(); - return Object.values(this._data.secrets).map((s) => ({ - id: s.id, - name: s.name, - integration: s.integration, - label: s.label, - permissions: s.permissions, - createdAt: s.createdAt, - updatedAt: s.updatedAt, - })); - } - - /** - * Get a secret including its decrypted field values. - */ - getSecret(id) { - this._requireUnlocked(); - const s = this._data.secrets[id]; - if (!s) throw new Error(`Secret not found: ${id}`); - return { ...s }; - } - - /** - * Create a new secret. - * @param {{ name, integration, label, fields, permissions }} opts - * @returns {string} new secret id - */ - createSecret({ name, integration, label, fields, permissions } = {}) { - this._requireUnlocked(); - const id = randomUUID(); - const now = new Date().toISOString(); - this._data.secrets[id] = { - id, - name: name ?? "Unnamed", - integration: integration ?? "custom", - label: label ?? "", - fields: fields ?? {}, - permissions: permissions ?? { agents: ["*"], workflows: ["*"], deny: [] }, - createdAt: now, - updatedAt: now, - }; - this._flush(); - return id; - } - - /** - * Update an existing secret's name/label/fields. - */ - updateSecret(id, { name, label, fields, permissions } = {}) { - this._requireUnlocked(); - const s = this._data.secrets[id]; - if (!s) throw new Error(`Secret not found: ${id}`); - if (name !== undefined) s.name = name; - if (label !== undefined) s.label = label; - if (fields !== undefined) s.fields = { ...s.fields, ...fields }; - if (permissions !== undefined) s.permissions = permissions; - s.updatedAt = new Date().toISOString(); - this._flush(); - } - - /** - * Delete a secret by id. - */ - deleteSecret(id) { - this._requireUnlocked(); - if (!this._data.secrets[id]) throw new Error(`Secret not found: ${id}`); - delete this._data.secrets[id]; - // Remove any mcpRefs pointing to this secret - for (const [k, v] of Object.entries(this._data.mcpRefs)) { - if (v === id) delete this._data.mcpRefs[k]; - } - this._flush(); - } - - /** - * Update RBAC permissions for a secret. - * @param {string} id - * @param {{ agents?: string[], workflows?: string[], deny?: string[] }} permissions - */ - setPermissions(id, permissions) { - this._requireUnlocked(); - const s = this._data.secrets[id]; - if (!s) throw new Error(`Secret not found: ${id}`); - s.permissions = { agents: ["*"], workflows: ["*"], deny: [], ...permissions }; - s.updatedAt = new Date().toISOString(); - this._flush(); - } - - // ── Env vars ───────────────────────────────────────────────────────────────── - - /** - * List all env var keys stored in the vault (no values). - */ - listEnvKeys() { - this._requireUnlocked(); - return Object.keys(this._data.env); - } - - /** - * Get an env var value. - */ - getEnv(key) { - this._requireUnlocked(); - return this._data.env[key]; - } - - /** - * Set (create or update) an env var. - */ - setEnv(key, value) { - this._requireUnlocked(); - this._data.env[key] = value; - this._flush(); - } - - /** - * Delete an env var. - */ - deleteEnv(key) { - this._requireUnlocked(); - delete this._data.env[key]; - this._flush(); - } - - // ── RBAC helpers ───────────────────────────────────────────────────────────── - - /** - * Check whether an agent is allowed to access a secret. - * @param {string} secretId - * @param {string} agentId - */ - canAgentAccess(secretId, agentId) { - const s = this._data?.secrets?.[secretId]; - if (!s) return false; - const { agents = ["*"], deny = [] } = s.permissions ?? {}; - if (deny.includes(agentId) || deny.includes("*")) return false; - return agents.includes("*") || agents.includes(agentId); - } - - /** - * Check whether a workflow is allowed to access a secret. - */ - canWorkflowAccess(secretId, workflowId) { - const s = this._data?.secrets?.[secretId]; - if (!s) return false; - const { workflows = ["*"], deny = [] } = s.permissions ?? {}; - if (deny.includes(workflowId) || deny.includes("*")) return false; - return workflows.includes("*") || workflows.includes(workflowId); - } - - // ── Process env injection ───────────────────────────────────────────────────── - - /** - * Merge vault env vars into a plain object suitable for process.env injection. - * Only includes keys not already present in the provided base env. - * @param {Record} [baseEnv] — defaults to {} - * @returns {Record} - */ - resolveEnv(baseEnv = {}) { - if (!this.isUnlocked()) return {}; - const out = {}; - for (const [k, v] of Object.entries(this._data.env)) { - if (!(k in baseEnv)) out[k] = v; - } - return out; - } - - /** - * Resolve env vars that the given agentId is allowed to access. - * Right now env vars are not RBAC-scoped (unlike secrets), but - * this method is exposed for future scoping. - */ - resolveEnvForAgent(_agentId, baseEnv = {}) { - return this.resolveEnv(baseEnv); - } - - // ── MCP refs ────────────────────────────────────────────────────────────────── - - /** - * Link an MCP env key to a secret id. - * e.g. "github.GITHUB_PERSONAL_ACCESS_TOKEN" → "" - */ - setMcpRef(mcpKey, secretId) { - this._requireUnlocked(); - this._data.mcpRefs[mcpKey] = secretId; - this._flush(); - } - - getMcpRef(mcpKey) { - this._requireUnlocked(); - return this._data.mcpRefs[mcpKey] ?? null; - } - - // ── Status ──────────────────────────────────────────────────────────────────── - - status() { - return { - initialized: this.isInitialized(), - unlocked: this.isUnlocked(), - secretCount: this.isUnlocked() ? Object.keys(this._data.secrets).length : null, - envCount: this.isUnlocked() ? Object.keys(this._data.env).length : null, - }; - } - - // ── Internal ────────────────────────────────────────────────────────────────── - - _requireUnlocked() { - if (!this.isUnlocked()) throw new Error("Vault is locked. Call open(key) first."); - } - - _validateKey(key) { - if (Buffer.isBuffer(key) && key.length === KEY_BYTES) return key; - if (typeof key === "string") { - const buf = Buffer.from(key, "hex"); - if (buf.length === KEY_BYTES) return buf; - } - throw new Error(`Vault key must be a 32-byte Buffer or 64-char hex string (got ${typeof key}).`); - } - - _flush() { - const plaintext = JSON.stringify(this._data); - const { iv, tag, data } = encrypt(this._key, plaintext); - writeEnvelope({ v: VAULT_VERSION, iv, tag, data }); - } -} - -// Shared singleton for use in server-side code -let _defaultVault = null; -export function getDefaultVault() { - if (!_defaultVault) _defaultVault = new VaultStore(); - return _defaultVault; -} - -export { VAULT_PATH, VAULT_DIR, KEY_BYTES }; diff --git a/package-lock.json b/package-lock.json index 4e92bd1fb..f4206912d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,19 +1,18 @@ { "name": "bosun", - "version": "0.42.6", + "version": "0.42.5", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "bosun", - "version": "0.42.6", + "version": "0.42.5", "hasInstallScript": true, "license": "Apache-2.0", "dependencies": { "@anthropic-ai/claude-agent-sdk": "latest", "@github/copilot-sdk": "latest", "@google/genai": "^1.44.0", - "@jridgewell/sourcemap-codec": "^1.5.5", "@modelcontextprotocol/sdk": "^1.26.0", "@openai/agents": "^0.5.2", "@openai/codex-sdk": "latest", @@ -57,7 +56,6 @@ "telegram-sentinel": "telegram/telegram-sentinel.mjs" }, "devDependencies": { - "@alcalzone/ansi-tokenize": "^0.1.3", "@emotion/react": "^11.14.0", "@emotion/styled": "^11.14.1", "@mui/material": "^5.18.0", @@ -1589,6 +1587,7 @@ "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { @@ -3593,9 +3592,9 @@ } }, "node_modules/cosmiconfig/node_modules/yaml": { - "version": "1.10.3", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.3.tgz", - "integrity": "sha512-vIYeF1u3CjlhAFekPPAk2h/Kv4T3mAkMox5OymRiJQB0spDP10LHvt+K7G9Ny6NuuMAb25/6n1qyUjAcGNf/AA==", + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", "dev": true, "license": "ISC", "engines": { @@ -5167,9 +5166,9 @@ "license": "MIT" }, "node_modules/path-to-regexp": { - "version": "8.4.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.4.0.tgz", - "integrity": "sha512-PuseHIvAnz3bjrM2rGJtSgo1zjgxapTLZ7x2pjhzWwlp4SJQgK3f3iZIQwkpEnBaKz6seKBADpM4B4ySkuYypg==", + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", "license": "MIT", "funding": { "type": "opencollective", diff --git a/package.json b/package.json index cd7c16c0c..98b140c7d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "bosun", - "version": "0.42.6", + "version": "0.42.5", "description": "Bosun Autonomous Engineering — manages AI agent executors with failover, extremely powerful workflow builder, and a massive amount of included default workflow templates for autonomous engineering, creates PRs via GitHub/Jira APIs, and sends Telegram notifications. Supports N executors with weighted distribution, multi-repo projects, and auto-setup.", "type": "module", "license": "Apache-2.0", @@ -149,8 +149,7 @@ "mutate": "npx stryker run", "mutate:incremental": "npx stryker run --incremental", "mutate:report": "node scripts/mutation-report.mjs", - "mutate:scoped": "npx stryker run --mutate", - "lint": "npm run syntax:check && npm run prompt:lint" + "mutate:scoped": "npx stryker run --mutate" }, "files": [ ".env.example", @@ -181,10 +180,9 @@ "agent/primary-agent.mjs", "agent/retry-queue.mjs", "agent/review-agent.mjs", - "agent/skills/", + "agent/skills/skill-codebase-audit.md", "bench/benchmark-mode.mjs", "bench/benchmark-registry.mjs", - "bench/eval-framework.mjs", "bench/swebench/bosun-swebench.mjs", "bosun-tui.mjs", "bosun.config.example.json", @@ -225,9 +223,7 @@ "infra/desktop-shortcut.mjs", "infra/error-detector.mjs", "infra/fetch-runtime.mjs", - "infra/guardrails.mjs", "infra/health-status.mjs", - "infra/heartbeat-monitor.mjs", "infra/library-manager-utils.mjs", "infra/library-manager-well-known-sources.mjs", "infra/library-manager.mjs", @@ -250,12 +246,8 @@ "kanban/kanban-adapter.mjs", "lib/codebase-audit-manifests.mjs", "lib/codebase-audit.mjs", - "lib/integrations-registry.mjs", "lib/logger.mjs", - "lib/mojibake-repair.mjs", "lib/session-insights.mjs", - "lib/vault-keychain.mjs", - "lib/vault.mjs", "merge-strategy.mjs", "monitor-tail-sanitizer.mjs", "postinstall.mjs", @@ -369,38 +361,36 @@ "@anthropic-ai/claude-agent-sdk": "latest", "@github/copilot-sdk": "latest", "@google/genai": "^1.44.0", - "@jridgewell/sourcemap-codec": "^1.5.5", "@modelcontextprotocol/sdk": "^1.26.0", "@openai/agents": "^0.5.2", "@openai/codex-sdk": "latest", "@opencode-ai/sdk": "latest", - "@opentelemetry/api": "^1.9.0", - "@opentelemetry/exporter-trace-otlp-http": "^0.206.0", - "@opentelemetry/resources": "^2.1.0", - "@opentelemetry/sdk-metrics": "^2.1.0", - "@opentelemetry/sdk-trace-base": "^2.1.0", - "@opentelemetry/semantic-conventions": "^1.37.0", "@preact/signals": "1.3.1", "@whiskeysockets/baileys": "^7.0.0-rc.9", "ajv": "^8.18.0", "es-module-shims": "^2.8.0", "express": "^5.1.0", "express-rate-limit": "^8.0.0", - "figures": "^6.1.0", "hono": "^4.12.7", "htm": "3.1.1", "ink": "^5.0.0", "ink-text-input": "^6.0.0", "preact": "10.25.4", "qrcode-terminal": "^0.12.0", + "ws": "^8.19.0", + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.206.0", + "@opentelemetry/resources": "^2.1.0", + "@opentelemetry/sdk-metrics": "^2.1.0", + "@opentelemetry/sdk-trace-base": "^2.1.0", + "@opentelemetry/semantic-conventions": "^1.37.0", + "figures": "^6.1.0", "react": "^18.3.1", - "react-dom": "^18.3.1", - "ws": "^8.19.0" + "react-dom": "^18.3.1" }, "devDependencies": { "@emotion/react": "^11.14.0", "@emotion/styled": "^11.14.1", - "@alcalzone/ansi-tokenize": "^0.1.3", "@mui/material": "^5.18.0", "@playwright/test": "^1.58.2", "playwright": "^1.58.2", diff --git a/playwright.config.mjs b/playwright.config.mjs index c814b991b..84a4d290f 100644 --- a/playwright.config.mjs +++ b/playwright.config.mjs @@ -1,9 +1,4 @@ -import * as playwrightTest from "@playwright/test"; - -const defineConfig = - playwrightTest.defineConfig ?? - playwrightTest.default?.defineConfig ?? - ((config) => config); +import { defineConfig } from "@playwright/test"; export default defineConfig({ testDir: "server", diff --git a/postinstall.mjs b/postinstall.mjs index 3d2dfb713..604ec9c53 100644 --- a/postinstall.mjs +++ b/postinstall.mjs @@ -36,10 +36,6 @@ const BUNDLED_PWSH_PATH = resolve(BUNDLED_PWSH_DIR, "pwsh"); const FALLBACK_PWSH_VERSION = "7.4.6"; const require = createRequire(import.meta.url); -function resolveNpmCommand() { - return process.platform === "win32" ? "npm.cmd" : "npm"; -} - // ── Helpers ────────────────────────────────────────────────────────────────── function commandExists(cmd) { @@ -377,7 +373,7 @@ async function main() { console.log(""); console.log(" ▸ Installing desktop dependencies (Electron)..."); try { - execSync(`${resolveNpmCommand()} install`, { + execSync("npm install", { cwd: desktopDir, stdio: "inherit", timeout: 0, @@ -448,7 +444,3 @@ async function main() { main().catch((err) => { console.error(` :alert: postinstall failed: ${err.message}`); }); - - - - diff --git a/prs-open.json b/prs-open.json deleted file mode 100644 index 78b4e35bf..000000000 --- a/prs-open.json +++ /dev/null @@ -1 +0,0 @@ -[{"headRefName":"guardrails","isDraft":false,"number":457,"state":"OPEN","title":"feat(Guardrails): add guardrails feature to Bosun","url":"https://github.com/virtengine/bosun/pull/457"},{"headRefName":"repair/pr437","isDraft":false,"number":456,"state":"OPEN","title":"Repair/pr437","url":"https://github.com/virtengine/bosun/pull/456"},{"headRefName":"task/a5787873546b-m-reliability-auto-update-failure-circuit-breake","isDraft":false,"number":455,"state":"OPEN","title":"[m] reliability: auto-update failure circuit breaker","url":"https://github.com/virtengine/bosun/pull/455"},{"headRefName":"task/ac13245d3271-feat-agents-copy-to-clipboard-session-id-pill-bu","isDraft":false,"number":454,"state":"OPEN","title":"feat(agents): copy-to-clipboard session ID pill button in agents table","url":"https://github.com/virtengine/bosun/pull/454"},{"headRefName":"task/eb0893c6b815-feat-tui-help-screen-and-keyboard-shortcut-refer","isDraft":false,"number":453,"state":"OPEN","title":"feat(tui): Help screen and keyboard shortcut reference overlay","url":"https://github.com/virtengine/bosun/pull/453"},{"headRefName":"pr-443-fix","isDraft":false,"number":452,"state":"OPEN","title":"fix/pr 443 ci/cd failures","url":"https://github.com/virtengine/bosun/pull/452"},{"headRefName":"task/bc219832c3a1-feat-eval-agent-evaluation-benchmarking-framewor","isDraft":false,"number":451,"state":"OPEN","title":"feat(eval): Agent Evaluation & Benchmarking Framework ÔÇö data-driven agent optimization","url":"https://github.com/virtengine/bosun/pull/451"},{"headRefName":"task/493ae5e42027-feat-css-tabular-numerals-on-all-numeric-portal-","isDraft":false,"number":450,"state":"OPEN","title":"feat(css): tabular numerals on all numeric portal outputs","url":"https://github.com/virtengine/bosun/pull/450"},{"headRefName":"task/b3364eceda27-feat-portal-live-websocket-connection-badge-in-p","isDraft":false,"number":449,"state":"OPEN","title":"feat(portal): live WebSocket connection badge in portal header","url":"https://github.com/virtengine/bosun/pull/449"},{"headRefName":"task/e69d47108981-feat-agents-input-output-token-split-display-per","isDraft":false,"number":448,"state":"OPEN","title":"feat(agents): input/output token split display per active session row","url":"https://github.com/virtengine/bosun/pull/448"},{"headRefName":"task/f8180f1d37de-feat-agents-live-session-turn-counter-alongside-","isDraft":false,"number":446,"state":"OPEN","title":"feat(agents): live session turn counter alongside runtime timer","url":"https://github.com/virtengine/bosun/pull/446"},{"headRefName":"task/cfd631a87666-feat-tui-session-detail-modal-full-session-drill","isDraft":false,"number":445,"state":"OPEN","title":"feat(tui): Session Detail modal ÔÇö full session drill-down with turn timeline and diff","url":"https://github.com/virtengine/bosun/pull/445"},{"headRefName":"task/56b361530b9c-feat-agent-add-mcp-tool-overhead-visibility-and-","isDraft":false,"number":443,"state":"OPEN","title":"feat(agent): add MCP tool overhead visibility and logging","url":"https://github.com/virtengine/bosun/pull/443"},{"headRefName":"task/3cc669aec7e8-feat-tui-telemetry-screen-ascii-sparklines-provi","isDraft":false,"number":441,"state":"OPEN","title":"feat(tui): Telemetry screen ÔÇö ASCII sparklines, provider stats, rate-limit heatmap, cost tracker","url":"https://github.com/virtengine/bosun/pull/441"},{"headRefName":"task/4567dec9f7e3-feat-tui-workflows-screen-list-trigger-inspect-a","isDraft":false,"number":440,"state":"OPEN","title":"feat(tui): Workflows screen list, trigger, inspect, and cancel workflow runs from terminal","url":"https://github.com/virtengine/bosun/pull/440"},{"headRefName":"task/3ad658101698-feat-tui-settings-screen-read-edit-bosun-config-","isDraft":false,"number":439,"state":"OPEN","title":"feat(tui): Settings screen read/edit bosun.config.json fields inline with validation","url":"https://github.com/virtengine/bosun/pull/439"},{"headRefName":"task/2316d3215b65-feat-tui-command-palette-fuzzy-find-any-action-w","isDraft":false,"number":438,"state":"OPEN","title":"feat(tui): Command palette fuzzy-find any action with Ctrl+P or : prefix","url":"https://github.com/virtengine/bosun/pull/438"},{"headRefName":"task/76cc7f25b5f2-m-feat-workflow-add-delegation-watchdog-to-auto-","isDraft":false,"number":437,"state":"OPEN","title":"[m] feat(workflow): add delegation watchdog to auto-recover stalled non-task nodes","url":"https://github.com/virtengine/bosun/pull/437"},{"headRefName":"task/3142f12b27d3-m-feat-workflow-add-delegation-audit-trail-and-r","isDraft":false,"number":436,"state":"OPEN","title":"[m] feat(workflow): add delegation audit trail and replay-safe state transitions","url":"https://github.com/virtengine/bosun/pull/436"},{"headRefName":"task/cc3da1b76df6-m-feat-task-enforce-task-batch-payload-schema-va","isDraft":false,"number":435,"state":"OPEN","title":"[m] feat(task): enforce task-batch payload schema validation at runtime boundaries","url":"https://github.com/virtengine/bosun/pull/435"}] diff --git a/prs.json b/prs.json deleted file mode 100644 index bade33fa2..000000000 --- a/prs.json +++ /dev/null @@ -1 +0,0 @@ -[{"baseRefName":"main","headRefName":"dependabot/npm_and_yarn/npm_and_yarn-7fa4943c34","isDraft":false,"mergedAt":"2026-03-28T21:56:53Z","number":464,"state":"MERGED","title":"chore(deps): bump path-to-regexp from 8.3.0 to 8.4.0 in the npm_and_yarn group across 1 directory","url":"https://github.com/virtengine/bosun/pull/464"},{"baseRefName":"main","headRefName":"repair/pr457-cwd-fallback","isDraft":false,"mergedAt":null,"number":463,"state":"CLOSED","title":"Repair/pr457 cwd fallback","url":"https://github.com/virtengine/bosun/pull/463"},{"baseRefName":"main","headRefName":"continue/add-continue-env-config","isDraft":false,"mergedAt":"2026-03-28T17:09:10Z","number":462,"state":"MERGED","title":"chore: add Continue environment configuration","url":"https://github.com/virtengine/bosun/pull/462"},{"baseRefName":"main","headRefName":"codex/site-demo-sync","isDraft":false,"mergedAt":"2026-03-27T19:34:04Z","number":461,"state":"MERGED","title":"Codex/site demo sync","url":"https://github.com/virtengine/bosun/pull/461"},{"baseRefName":"main","headRefName":"codex/site-demo-sync","isDraft":false,"mergedAt":null,"number":460,"state":"CLOSED","title":"Codex/site demo sync","url":"https://github.com/virtengine/bosun/pull/460"},{"baseRefName":"main","headRefName":"task/93cd6f915fef-s-preflight-detect-interactive-git-editor-and-gu","isDraft":false,"mergedAt":"2026-03-27T06:41:14Z","number":459,"state":"MERGED","title":"[s] preflight: detect interactive git editor and guide remediation","url":"https://github.com/virtengine/bosun/pull/459"},{"baseRefName":"main","headRefName":"codex/site-demo-sync","isDraft":false,"mergedAt":"2026-03-26T16:24:35Z","number":458,"state":"MERGED","title":"fix(site): demo site UI CSS resolution","url":"https://github.com/virtengine/bosun/pull/458"},{"baseRefName":"main","headRefName":"guardrails","isDraft":false,"mergedAt":null,"number":457,"state":"OPEN","title":"feat(Guardrails): add guardrails feature to Bosun","url":"https://github.com/virtengine/bosun/pull/457"},{"baseRefName":"main","headRefName":"repair/pr437","isDraft":false,"mergedAt":null,"number":456,"state":"OPEN","title":"Repair/pr437","url":"https://github.com/virtengine/bosun/pull/456"},{"baseRefName":"main","headRefName":"task/a5787873546b-m-reliability-auto-update-failure-circuit-breake","isDraft":false,"mergedAt":null,"number":455,"state":"OPEN","title":"[m] reliability: auto-update failure circuit breaker","url":"https://github.com/virtengine/bosun/pull/455"},{"baseRefName":"main","headRefName":"task/ac13245d3271-feat-agents-copy-to-clipboard-session-id-pill-bu","isDraft":false,"mergedAt":null,"number":454,"state":"OPEN","title":"feat(agents): copy-to-clipboard session ID pill button in agents table","url":"https://github.com/virtengine/bosun/pull/454"},{"baseRefName":"main","headRefName":"task/eb0893c6b815-feat-tui-help-screen-and-keyboard-shortcut-refer","isDraft":false,"mergedAt":null,"number":453,"state":"OPEN","title":"feat(tui): Help screen and keyboard shortcut reference overlay","url":"https://github.com/virtengine/bosun/pull/453"},{"baseRefName":"main","headRefName":"pr-443-fix","isDraft":false,"mergedAt":null,"number":452,"state":"OPEN","title":"fix/pr 443 ci/cd failures","url":"https://github.com/virtengine/bosun/pull/452"},{"baseRefName":"main","headRefName":"task/bc219832c3a1-feat-eval-agent-evaluation-benchmarking-framewor","isDraft":false,"mergedAt":null,"number":451,"state":"OPEN","title":"feat(eval): Agent Evaluation & Benchmarking Framework ÔÇö data-driven agent optimization","url":"https://github.com/virtengine/bosun/pull/451"},{"baseRefName":"main","headRefName":"task/493ae5e42027-feat-css-tabular-numerals-on-all-numeric-portal-","isDraft":false,"mergedAt":null,"number":450,"state":"OPEN","title":"feat(css): tabular numerals on all numeric portal outputs","url":"https://github.com/virtengine/bosun/pull/450"},{"baseRefName":"main","headRefName":"task/b3364eceda27-feat-portal-live-websocket-connection-badge-in-p","isDraft":false,"mergedAt":null,"number":449,"state":"OPEN","title":"feat(portal): live WebSocket connection badge in portal header","url":"https://github.com/virtengine/bosun/pull/449"},{"baseRefName":"main","headRefName":"task/e69d47108981-feat-agents-input-output-token-split-display-per","isDraft":false,"mergedAt":null,"number":448,"state":"OPEN","title":"feat(agents): input/output token split display per active session row","url":"https://github.com/virtengine/bosun/pull/448"},{"baseRefName":"main","headRefName":"task/24acc4022620-feat-skills-enforce-lazy-conditional-skill-injec","isDraft":false,"mergedAt":"2026-03-26T14:50:34Z","number":447,"state":"MERGED","title":"feat(skills): enforce lazy/conditional skill injection with char budget cap","url":"https://github.com/virtengine/bosun/pull/447"},{"baseRefName":"main","headRefName":"task/f8180f1d37de-feat-agents-live-session-turn-counter-alongside-","isDraft":false,"mergedAt":null,"number":446,"state":"OPEN","title":"feat(agents): live session turn counter alongside runtime timer","url":"https://github.com/virtengine/bosun/pull/446"},{"baseRefName":"main","headRefName":"task/cfd631a87666-feat-tui-session-detail-modal-full-session-drill","isDraft":false,"mergedAt":null,"number":445,"state":"OPEN","title":"feat(tui): Session Detail modal ÔÇö full session drill-down with turn timeline and diff","url":"https://github.com/virtengine/bosun/pull/445"},{"baseRefName":"main","headRefName":"task/3f0091bff149-feat-tui-logs-screen-streaming-multi-source-log-","isDraft":false,"mergedAt":"2026-03-26T19:18:18Z","number":444,"state":"MERGED","title":"feat(tui): Logs screen ÔÇö streaming multi-source log viewer with session filter and search","url":"https://github.com/virtengine/bosun/pull/444"},{"baseRefName":"main","headRefName":"task/56b361530b9c-feat-agent-add-mcp-tool-overhead-visibility-and-","isDraft":false,"mergedAt":null,"number":443,"state":"OPEN","title":"feat(agent): add MCP tool overhead visibility and logging","url":"https://github.com/virtengine/bosun/pull/443"},{"baseRefName":"main","headRefName":"dependabot/npm_and_yarn/npm_and_yarn-d633397f06","isDraft":false,"mergedAt":"2026-03-26T11:51:38Z","number":442,"state":"MERGED","title":"chore(deps-dev): bump the npm_and_yarn group across 2 directories with 1 update","url":"https://github.com/virtengine/bosun/pull/442"},{"baseRefName":"main","headRefName":"task/3cc669aec7e8-feat-tui-telemetry-screen-ascii-sparklines-provi","isDraft":false,"mergedAt":null,"number":441,"state":"OPEN","title":"feat(tui): Telemetry screen ÔÇö ASCII sparklines, provider stats, rate-limit heatmap, cost tracker","url":"https://github.com/virtengine/bosun/pull/441"},{"baseRefName":"main","headRefName":"task/4567dec9f7e3-feat-tui-workflows-screen-list-trigger-inspect-a","isDraft":false,"mergedAt":null,"number":440,"state":"OPEN","title":"feat(tui): Workflows screen list, trigger, inspect, and cancel workflow runs from terminal","url":"https://github.com/virtengine/bosun/pull/440"},{"baseRefName":"main","headRefName":"task/3ad658101698-feat-tui-settings-screen-read-edit-bosun-config-","isDraft":false,"mergedAt":null,"number":439,"state":"OPEN","title":"feat(tui): Settings screen read/edit bosun.config.json fields inline with validation","url":"https://github.com/virtengine/bosun/pull/439"},{"baseRefName":"main","headRefName":"task/2316d3215b65-feat-tui-command-palette-fuzzy-find-any-action-w","isDraft":false,"mergedAt":null,"number":438,"state":"OPEN","title":"feat(tui): Command palette fuzzy-find any action with Ctrl+P or : prefix","url":"https://github.com/virtengine/bosun/pull/438"},{"baseRefName":"main","headRefName":"task/76cc7f25b5f2-m-feat-workflow-add-delegation-watchdog-to-auto-","isDraft":false,"mergedAt":null,"number":437,"state":"OPEN","title":"[m] feat(workflow): add delegation watchdog to auto-recover stalled non-task nodes","url":"https://github.com/virtengine/bosun/pull/437"},{"baseRefName":"main","headRefName":"task/3142f12b27d3-m-feat-workflow-add-delegation-audit-trail-and-r","isDraft":false,"mergedAt":null,"number":436,"state":"OPEN","title":"[m] feat(workflow): add delegation audit trail and replay-safe state transitions","url":"https://github.com/virtengine/bosun/pull/436"},{"baseRefName":"main","headRefName":"task/cc3da1b76df6-m-feat-task-enforce-task-batch-payload-schema-va","isDraft":false,"mergedAt":null,"number":435,"state":"OPEN","title":"[m] feat(task): enforce task-batch payload schema validation at runtime boundaries","url":"https://github.com/virtengine/bosun/pull/435"},{"baseRefName":"main","headRefName":"task/0e58fe1ea0fd-s-feat-task-harden-taskstats-contract-and-fixtur","isDraft":false,"mergedAt":"2026-03-26T10:09:32Z","number":434,"state":"MERGED","title":"[s] feat(task): harden taskStats contract and fixture realism in task CLI tests","url":"https://github.com/virtengine/bosun/pull/434"},{"baseRefName":"main","headRefName":"merge/feat-enhance-command-diagnostics","isDraft":false,"mergedAt":"2026-03-25T04:39:08Z","number":433,"state":"MERGED","title":"merge: feat/enhance-command-diagnostics","url":"https://github.com/virtengine/bosun/pull/433"},{"baseRefName":"main","headRefName":"repair-pr-431","isDraft":false,"mergedAt":"2026-03-25T04:35:01Z","number":432,"state":"MERGED","title":"Repair pr 431","url":"https://github.com/virtengine/bosun/pull/432"},{"baseRefName":"main","headRefName":"task/7c489b80e31b-s-feat-task-surface-repo-area-lock-contention-te","isDraft":false,"mergedAt":"2026-03-25T03:04:05Z","number":431,"state":"MERGED","title":"[s] feat(task): surface repo-area lock contention telemetry in operator views","url":"https://github.com/virtengine/bosun/pull/431"},{"baseRefName":"main","headRefName":"release/v0.42.5","isDraft":false,"mergedAt":"2026-03-24T16:58:41Z","number":430,"state":"MERGED","title":"chore(release): v0.42.5 — skill externalization, provider context fix","url":"https://github.com/virtengine/bosun/pull/430"},{"baseRefName":"main","headRefName":"bosun/fix-pr425","isDraft":false,"mergedAt":"2026-03-24T15:34:04Z","number":429,"state":"MERGED","title":"Fix workflow template node command args","url":"https://github.com/virtengine/bosun/pull/429"},{"baseRefName":"main","headRefName":"task/0003889d6381-feat-tui-tasks-screen-full-kanban-crud-in-termin","isDraft":false,"mergedAt":null,"number":428,"state":"CLOSED","title":"feat(tui): Tasks screen ÔÇö full kanban CRUD in terminal with column view and filters","url":"https://github.com/virtengine/bosun/pull/428"},{"baseRefName":"main","headRefName":"task/0d255b09eec1-feat-tui-ws-bridge-expose-monitor-stats-and-sess","isDraft":false,"mergedAt":"2026-03-24T13:33:42Z","number":427,"state":"MERGED","title":"feat(tui): WS bridge ÔÇö expose monitor stats and session events over the local bosun WS bus for TUI consumption","url":"https://github.com/virtengine/bosun/pull/427"},{"baseRefName":"main","headRefName":"task/0d255b09eec1-feat-tui-ws-bridge-expose-monitor-stats-and-sess-retry2","isDraft":false,"mergedAt":"2026-03-24T15:38:30Z","number":426,"state":"MERGED","title":"feat(tui): expose canonical ws session snapshots for TUI bridge","url":"https://github.com/virtengine/bosun/pull/426"},{"baseRefName":"main","headRefName":"task/9c7d7ce37efe-m-feat-workflow-add-canvas-support-for-explicit-","isDraft":false,"mergedAt":"2026-03-24T06:37:58Z","number":425,"state":"MERGED","title":"[m] feat(workflow): add canvas support for explicit edge port mapping","url":"https://github.com/virtengine/bosun/pull/425"},{"baseRefName":"main","headRefName":"task/45dde7fb42db-feat-tui-create-task-from-terminal-form","isDraft":false,"mergedAt":"2026-03-24T14:58:14Z","number":424,"state":"MERGED","title":"feat(tui): Create task from terminal form","url":"https://github.com/virtengine/bosun/pull/424"},{"baseRefName":"main","headRefName":"task/3e64f3186954-m-feat-infra-expose-poisoned-worktree-auto-repai","isDraft":false,"mergedAt":"2026-03-24T07:23:41Z","number":423,"state":"MERGED","title":"[m] feat(infra): expose poisoned worktree auto-repair events and recovery status","url":"https://github.com/virtengine/bosun/pull/423"},{"baseRefName":"main","headRefName":"task/8b2f4eb852ae-m-feat-voice-add-turn-trace-replay-and-mismatch-","isDraft":false,"mergedAt":"2026-03-24T05:06:15Z","number":422,"state":"MERGED","title":"[m] feat(voice): add turn-trace replay and mismatch diagnostics for voice actions","url":"https://github.com/virtengine/bosun/pull/422"},{"baseRefName":"main","headRefName":"task/2962c1fe050e-m-feat-workflow-add-custom-node-plugin-health-ch","isDraft":false,"mergedAt":"2026-03-24T04:50:26Z","number":421,"state":"MERGED","title":"[m] feat(workflow): add custom node plugin health checks and scaffold validation","url":"https://github.com/virtengine/bosun/pull/421"},{"baseRefName":"main","headRefName":"task/c0606b76d46a-feat-tui-tui-test-suite-unit-and-integration-tes","isDraft":false,"mergedAt":"2026-03-24T05:48:37Z","number":420,"state":"MERGED","title":"feat(tui): TUI test suite unit and integration tests for all screens and the WS bridge","url":"https://github.com/virtengine/bosun/pull/420"},{"baseRefName":"main","headRefName":"task/adad2e42e32d-feat-workflow-canvas-node-groups-sub-workflow-ex","isDraft":false,"mergedAt":"2026-03-24T15:43:56Z","number":419,"state":"MERGED","title":"feat(workflow-canvas): node groups, sub-workflow extraction, and workflow import/export","url":"https://github.com/virtengine/bosun/pull/419"},{"baseRefName":"main","headRefName":"task/7343fecc3731-chore-prompts-audit-and-remove-verbose-narration","isDraft":false,"mergedAt":"2026-03-24T14:55:34Z","number":418,"state":"MERGED","title":"chore(prompts): audit and remove verbose narration patterns from agent prompt templates","url":"https://github.com/virtengine/bosun/pull/418"},{"baseRefName":"main","headRefName":"task/2cd5daa54fc9-m-feat-ui-harden-session-recovery-across-all-min","isDraft":false,"mergedAt":"2026-03-23T23:15:46Z","number":417,"state":"MERGED","title":"[m] feat(ui): harden session recovery across all Mini App data surfaces","url":"https://github.com/virtengine/bosun/pull/417"},{"baseRefName":"main","headRefName":"task/0003889d6381-feat-tui-tasks-screen-full-kanban-crud-in-termin","isDraft":false,"mergedAt":"2026-03-23T20:33:46Z","number":416,"state":"MERGED","title":"feat(tui): Tasks screen ÔÇö full kanban CRUD in terminal with column view and filters","url":"https://github.com/virtengine/bosun/pull/416"},{"baseRefName":"main","headRefName":"task/349f9f0d324d-feat-tui-tui-architecture-scaffold-bosun-tui-mjs","isDraft":false,"mergedAt":"2026-03-23T20:54:12Z","number":415,"state":"MERGED","title":"feat(tui): TUI architecture ÔÇö scaffold bosun-tui.mjs with ink, entry point, and screen router","url":"https://github.com/virtengine/bosun/pull/415"},{"baseRefName":"main","headRefName":"task/ba795c04e248-feat-tui-status-header-panel-system-wide-metrics","isDraft":false,"mergedAt":"2026-03-23T21:20:45Z","number":414,"state":"MERGED","title":"feat(tui): Status Header panel ÔÇö system-wide metrics bar pinned at top of every screen","url":"https://github.com/virtengine/bosun/pull/414"},{"baseRefName":"main","headRefName":"task/62e528af296f-m-feat-workflow-classify-and-surface-validation-","isDraft":false,"mergedAt":"2026-03-23T20:10:49Z","number":413,"state":"MERGED","title":"[m] feat(workflow): classify and surface validation-runner failure modes","url":"https://github.com/virtengine/bosun/pull/413"},{"baseRefName":"main","headRefName":"task/T1-test-task","isDraft":false,"mergedAt":null,"number":412,"state":"CLOSED","title":"Test task","url":"https://github.com/virtengine/bosun/pull/412"},{"baseRefName":"main","headRefName":"task/d9765e1b4f84-l-feat-tracing-propagate-workflow-and-task-span-","isDraft":false,"mergedAt":"2026-03-24T13:47:02Z","number":411,"state":"MERGED","title":"[l] feat(tracing): propagate workflow and task span context end-to-end","url":"https://github.com/virtengine/bosun/pull/411"},{"baseRefName":"main","headRefName":"task/70ca0aa8fa25-m-feat-task-extend-path-normalization-to-attachm","isDraft":false,"mergedAt":"2026-03-23T18:22:04Z","number":410,"state":"MERGED","title":"[m] feat(task): extend path normalization to attachments and archive workflows","url":"https://github.com/virtengine/bosun/pull/410"},{"baseRefName":"main","headRefName":"task/bb067c3685ad-l-feat-workflow-inject-repo-map-context-into-pla","isDraft":false,"mergedAt":"2026-03-23T22:59:14Z","number":409,"state":"MERGED","title":"feat(workflow): inject repo-map context into planner and execution nodes","url":"https://github.com/virtengine/bosun/pull/409"},{"baseRefName":"main","headRefName":"task/431107973873-agentfield-style-durable-execution-ledger-for-wo","isDraft":false,"mergedAt":"2026-03-24T13:08:12Z","number":408,"state":"MERGED","title":"AgentField-style durable execution ledger for workflow and autonomous runs","url":"https://github.com/virtengine/bosun/pull/408"},{"baseRefName":"main","headRefName":"task/bbe2f02ec1b5-m-feat-task-harden-task-store-path-normalization","isDraft":false,"mergedAt":null,"number":407,"state":"CLOSED","title":"[m] feat(task): harden task-store path normalization and collision safeguards","url":"https://github.com/virtengine/bosun/pull/407"},{"baseRefName":"main","headRefName":"task/0bc4b6cdcf09-feat-tracing-opentelemetry-tracing-industry-stan","isDraft":false,"mergedAt":"2026-03-23T12:43:03Z","number":406,"state":"MERGED","title":"task/0bc4b6cdcf09 feat tracing opentelemetry tracing industry stan","url":"https://github.com/virtengine/bosun/pull/406"},{"baseRefName":"main","headRefName":"task/67e0e1c84306-m-feat-ui-unify-session-list-resilience-paths-fo","isDraft":false,"mergedAt":"2026-03-23T12:45:15Z","number":405,"state":"MERGED","title":"[m] feat(ui): unify session-list resilience paths for load-error, 404 fallback, and retry","url":"https://github.com/virtengine/bosun/pull/405"},{"baseRefName":"main","headRefName":"task/e7b6f2bbc629-l-feat-kanban-propagate-pr-linkage-integrity-fro","isDraft":false,"mergedAt":"2026-03-23T02:02:38Z","number":404,"state":"MERGED","title":"feat(kanban): propagate PR linkage integrity from status updates through board refresh","url":"https://github.com/virtengine/bosun/pull/404"},{"baseRefName":"main","headRefName":"monitor/bosun-env-stability","isDraft":false,"mergedAt":null,"number":403,"state":"CLOSED","title":"fix: prefer explicit repo-root task store","url":"https://github.com/virtengine/bosun/pull/403"},{"baseRefName":"main","headRefName":"task/bbe2f02ec1b5-m-feat-task-harden-task-store-path-normalization","isDraft":false,"mergedAt":null,"number":402,"state":"CLOSED","title":"feat(task): harden task-store path normalization and collision safeguards","url":"https://github.com/virtengine/bosun/pull/402"},{"baseRefName":"main","headRefName":"monitor/bosun-env-stability","isDraft":false,"mergedAt":null,"number":401,"state":"CLOSED","title":"fix: pin task batch to source backlog store","url":"https://github.com/virtengine/bosun/pull/401"},{"baseRefName":"main","headRefName":"task/c04d09d454fa-isolated-heavy-runner-pool-for-build-test-and-va","isDraft":false,"mergedAt":"2026-03-23T10:12:48Z","number":400,"state":"MERGED","title":"Add isolated heavy runner pool for validation workflows","url":"https://github.com/virtengine/bosun/pull/400"},{"baseRefName":"main","headRefName":"task/b6e576b28f97-m-feat-testing-build-cross-module-regression-sui","isDraft":false,"mergedAt":"2026-03-23T01:46:29Z","number":399,"state":"MERGED","title":"feat(testing): build cross-module regression suite for monitor-workflow-task handoff invariants","url":"https://github.com/virtengine/bosun/pull/399"},{"baseRefName":"main","headRefName":"monitor/bosun-env-stability","isDraft":false,"mergedAt":null,"number":398,"state":"CLOSED","title":"fix: prefer explicit repo-root task store","url":"https://github.com/virtengine/bosun/pull/398"},{"baseRefName":"main","headRefName":"task/c8067759906a-sift-style-semantic-command-router-and-context-b","isDraft":false,"mergedAt":"2026-03-23T15:20:48Z","number":397,"state":"MERGED","title":"Sift-style semantic command router and context budget policies","url":"https://github.com/virtengine/bosun/pull/397"},{"baseRefName":"main","headRefName":"monitor/bosun-env-stability","isDraft":false,"mergedAt":null,"number":396,"state":"CLOSED","title":"fix: honor explicit repo-root task store","url":"https://github.com/virtengine/bosun/pull/396"},{"baseRefName":"main","headRefName":"task/ab1c4446fbe8-swe-af-style-dagstate-replanning-and-issue-advis","isDraft":false,"mergedAt":"2026-03-23T01:56:19Z","number":395,"state":"MERGED","title":"feat: SWE-AF DAGState replanning and issue-advisor loop","url":"https://github.com/virtengine/bosun/pull/395"},{"baseRefName":"main","headRefName":"task/a3056106c65c-repo-map-and-architect-editor-execution-mode","isDraft":false,"mergedAt":"2026-03-22T21:18:46Z","number":394,"state":"MERGED","title":"Repo map and architect-editor execution mode","url":"https://github.com/virtengine/bosun/pull/394"},{"baseRefName":"main","headRefName":"task/bbe2f02ec1b5-m-feat-task-harden-task-store-path-normalization","isDraft":false,"mergedAt":"2026-03-22T20:55:47Z","number":393,"state":"MERGED","title":"feat(task): harden task-store path normalization","url":"https://github.com/virtengine/bosun/pull/393"},{"baseRefName":"main","headRefName":"task/6da44dbcd6f3-m-feat-agent-enforce-executor-repo-area-lock-fai","isDraft":false,"mergedAt":"2026-03-22T20:32:18Z","number":392,"state":"MERGED","title":"feat(agent): enforce executor repo-area lock fairness","url":"https://github.com/virtengine/bosun/pull/392"},{"baseRefName":"main","headRefName":"task/15266997314a-m-feat-infra-add-monitor-self-healing-backoff-po","isDraft":false,"mergedAt":"2026-03-22T20:28:35Z","number":391,"state":"MERGED","title":"feat(infra): add bounded monitor recovery policy","url":"https://github.com/virtengine/bosun/pull/391"},{"baseRefName":"main","headRefName":"monitor/bosun-env-stability","isDraft":false,"mergedAt":"2026-03-22T20:21:58Z","number":390,"state":"MERGED","title":"fix: dispatch task batch lifecycle fan-out","url":"https://github.com/virtengine/bosun/pull/390"},{"baseRefName":"repair/pr-377-fallback","headRefName":"copilot/sub-pr-388","isDraft":true,"mergedAt":null,"number":389,"state":"CLOSED","title":"Fix all failing tests and CI workflow checks","url":"https://github.com/virtengine/bosun/pull/389"},{"baseRefName":"main","headRefName":"repair/pr-377-fallback","isDraft":false,"mergedAt":null,"number":388,"state":"CLOSED","title":"Repair/pr 377 fallback","url":"https://github.com/virtengine/bosun/pull/388"},{"baseRefName":"main","headRefName":"repair/pr-377-fallback","isDraft":false,"mergedAt":null,"number":387,"state":"CLOSED","title":"Repair/pr 377 fallback","url":"https://github.com/virtengine/bosun/pull/387"},{"baseRefName":"main","headRefName":"task/bbe2f02ec1b5-m-feat-task-harden-task-store-path-normalization","isDraft":false,"mergedAt":null,"number":386,"state":"CLOSED","title":"[m] feat(task): harden task-store path normalization and collision safeguards","url":"https://github.com/virtengine/bosun/pull/386"},{"baseRefName":"main","headRefName":"task/6da44dbcd6f3-m-feat-agent-enforce-executor-repo-area-lock-fai","isDraft":false,"mergedAt":null,"number":385,"state":"CLOSED","title":"[m] feat(agent): enforce executor repo-area lock fairness and starvation telemetry","url":"https://github.com/virtengine/bosun/pull/385"},{"baseRefName":"main","headRefName":"task/15266997314a-m-feat-infra-add-monitor-self-healing-backoff-po","isDraft":false,"mergedAt":null,"number":384,"state":"CLOSED","title":"[m] feat(infra): add monitor self-healing backoff policy for stale dispatch recovery","url":"https://github.com/virtengine/bosun/pull/384"},{"baseRefName":"main","headRefName":"repair/pr-377-fallback","isDraft":false,"mergedAt":"2026-03-22T16:38:06Z","number":383,"state":"MERGED","title":"Repair/pr 377 fallback","url":"https://github.com/virtengine/bosun/pull/383"},{"baseRefName":"main","headRefName":"task/0d255b09eec1-feat-tui-ws-bridge-expose-monitor-stats-and-sess","isDraft":false,"mergedAt":null,"number":382,"state":"CLOSED","title":"Task/0d255b09eec1 feat tui ws bridge expose monitor stats and sess","url":"https://github.com/virtengine/bosun/pull/382"},{"baseRefName":"main","headRefName":"task/a1731dc43e95-trajectory-replay-and-lakeview-style-run-summari","isDraft":false,"mergedAt":"2026-03-22T20:36:42Z","number":381,"state":"MERGED","title":"Add replayable trajectories and run summaries","url":"https://github.com/virtengine/bosun/pull/381"},{"baseRefName":"task/a3056106c65c-repo-map-and-architect-editor-execution-mode","headRefName":"copilot/sub-pr-377","isDraft":false,"mergedAt":"2026-03-22T15:40:06Z","number":380,"state":"MERGED","title":"fix(agent): eliminate inferExecutionRole duplication, add allowlist validation, sanitize changedFiles paths, add prompt-framing tests","url":"https://github.com/virtengine/bosun/pull/380"},{"baseRefName":"recovery/staged-work-20260323","headRefName":"copilot/sub-pr-378","isDraft":false,"mergedAt":"2026-03-22T15:39:58Z","number":379,"state":"MERGED","title":"fix(ci): align workspace mirror paths, API limits, and stale claim filtering","url":"https://github.com/virtengine/bosun/pull/379"},{"baseRefName":"main","headRefName":"recovery/staged-work-20260323","isDraft":false,"mergedAt":null,"number":378,"state":"CLOSED","title":"chore(recovery): preserve 98 files of staged work from task branches","url":"https://github.com/virtengine/bosun/pull/378"},{"baseRefName":"main","headRefName":"task/a3056106c65c-repo-map-and-architect-editor-execution-mode","isDraft":false,"mergedAt":null,"number":377,"state":"CLOSED","title":"Repo map and architect-editor execution mode","url":"https://github.com/virtengine/bosun/pull/377"},{"baseRefName":"main","headRefName":"task/e8b43644d7cc-untitled-task","isDraft":false,"mergedAt":"2026-03-22T14:26:47Z","number":376,"state":"MERGED","title":"Fix task recovery and OAuth env normalization","url":"https://github.com/virtengine/bosun/pull/376"},{"baseRefName":"main","headRefName":"live-pr-349","isDraft":false,"mergedAt":"2026-03-22T13:15:18Z","number":375,"state":"MERGED","title":"Live pr 349","url":"https://github.com/virtengine/bosun/pull/375"},{"baseRefName":"main","headRefName":"live-pr-355","isDraft":false,"mergedAt":"2026-03-22T12:52:23Z","number":374,"state":"MERGED","title":"Live pr 355","url":"https://github.com/virtengine/bosun/pull/374"},{"baseRefName":"main","headRefName":"fix/diff-review-merge-sweep","isDraft":false,"mergedAt":"2026-03-22T11:33:30Z","number":373,"state":"MERGED","title":"feat: improve task diff review UX","url":"https://github.com/virtengine/bosun/pull/373"},{"baseRefName":"main","headRefName":"task/9651aef2d46e-scoped-persistent-memory-for-teams-workspaces-se","isDraft":false,"mergedAt":"2026-03-22T10:24:25Z","number":372,"state":"MERGED","title":"Add scoped persistent memory support","url":"https://github.com/virtengine/bosun/pull/372"},{"baseRefName":"main","headRefName":"monitor/bosun-env-stability","isDraft":false,"mergedAt":null,"number":371,"state":"CLOSED","title":"fix: restore task batch fan-out dispatch","url":"https://github.com/virtengine/bosun/pull/371"},{"baseRefName":"main","headRefName":"monitor/bosun-env-stability","isDraft":false,"mergedAt":null,"number":370,"state":"CLOSED","title":"fix: restore task batch coordinator gate","url":"https://github.com/virtengine/bosun/pull/370"},{"baseRefName":"main","headRefName":"task/ba93577e35ec-untitled-task","isDraft":false,"mergedAt":null,"number":369,"state":"CLOSED","title":"fix(ui): normalize Windows stream timeline paths","url":"https://github.com/virtengine/bosun/pull/369"},{"baseRefName":"main","headRefName":"fix/watchdog-update-behind-branches","isDraft":false,"mergedAt":null,"number":368,"state":"CLOSED","title":"fix(watchdog): auto-update behind branches before merge attempt","url":"https://github.com/virtengine/bosun/pull/368"},{"baseRefName":"main","headRefName":"task/f3c301092b60-untitled-task","isDraft":false,"mergedAt":null,"number":367,"state":"CLOSED","title":"Untitled task","url":"https://github.com/virtengine/bosun/pull/367"},{"baseRefName":"main","headRefName":"fix/f3c30109-stream-timeline-file-change","isDraft":false,"mergedAt":"2026-03-22T03:08:55Z","number":366,"state":"MERGED","title":"fix(ui): keep file change timeline blocks separate","url":"https://github.com/virtengine/bosun/pull/366"},{"baseRefName":"main","headRefName":"task/f3c301092b60-untitled-task","isDraft":false,"mergedAt":null,"number":365,"state":"CLOSED","title":"fix(ui): keep file change timeline blocks separate","url":"https://github.com/virtengine/bosun/pull/365"}] diff --git a/server/bosun-mcp-server.mjs b/server/bosun-mcp-server.mjs index 3be7688cf..62860c1f0 100644 --- a/server/bosun-mcp-server.mjs +++ b/server/bosun-mcp-server.mjs @@ -1,34 +1,15 @@ #!/usr/bin/env node -import { - existsSync, - mkdirSync, - readdirSync, - readFileSync, - statSync, - writeFileSync, -} from "node:fs"; -import { dirname, join, resolve } from "node:path"; import { fileURLToPath } from "node:url"; +import { resolve } from "node:path"; import { format } from "node:util"; -import * as mcpServer from "@modelcontextprotocol/sdk/server/index.js"; -import * as mcpStdio from "@modelcontextprotocol/sdk/server/stdio.js"; -import * as mcpTypes from "@modelcontextprotocol/sdk/types.js"; -import { repairCommonMojibake } from "../lib/mojibake-repair.mjs"; - -const Server = mcpServer.Server ?? mcpServer.default?.Server; -const StdioServerTransport = - mcpStdio.StdioServerTransport ?? - mcpStdio.default?.StdioServerTransport; -const CallToolRequestSchema = - mcpTypes.CallToolRequestSchema ?? - mcpTypes.default?.CallToolRequestSchema; -const ListToolsRequestSchema = - mcpTypes.ListToolsRequestSchema ?? - mcpTypes.default?.ListToolsRequestSchema; +import { Server } from "@modelcontextprotocol/sdk/server/index.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { + CallToolRequestSchema, + ListToolsRequestSchema, +} from "@modelcontextprotocol/sdk/types.js"; const TAG = "[bosun-mcp]"; -const ResolvedCallToolRequestSchema = CallToolRequestSchema; -const ResolvedListToolsRequestSchema = ListToolsRequestSchema; const DEFAULT_DISCOVERY_PORTS = [3080, 4400]; const DEFAULT_REQUEST_TIMEOUT_MS = 10_000; const ENV_KEYS_FOR_EMBEDDED = [ @@ -626,125 +607,9 @@ export function listBosunMcpTools() { required: ["toolName"], }, }, - // ── File tools ───────────────────────────────────────────────────────── - { - name: "str_replace_editor", - description: - "Make a surgical edit to a file by replacing an exact string. " + - "Safer than full rewrites — preserves encoding, only changes what's specified. " + - "Prefer this over shell-based patching. " + - "The old_str must match exactly (whitespace included). " + - "Call read_file first if you need to verify the exact text.", - inputSchema: { - type: "object", - properties: { - path: { type: "string", description: "Absolute or workspace-relative file path." }, - old_str: { type: "string", description: "Exact string to find and replace (must be unique in the file)." }, - new_str: { type: "string", description: "Replacement string." }, - workspace_path: { type: "string", description: "Workspace root for resolving relative paths." }, - }, - required: ["path", "old_str", "new_str"], - }, - }, - { - name: "write_file", - description: - "Write content to a file, creating it and any missing parent directories if needed. " + - "Use for new files or when a complete rewrite is necessary. " + - "For editing existing files prefer str_replace_editor.", - inputSchema: { - type: "object", - properties: { - path: { type: "string", description: "Absolute or workspace-relative file path." }, - content: { type: "string", description: "Full file content to write." }, - workspace_path: { type: "string", description: "Workspace root for resolving relative paths." }, - }, - required: ["path", "content"], - }, - }, - { - name: "read_file", - description: - "Read a file's content with line numbers. Use start_line and end_line to focus on a range. " + - "Always call this before str_replace_editor to confirm the exact text you want to replace.", - inputSchema: { - type: "object", - properties: { - path: { type: "string", description: "Absolute or workspace-relative file path." }, - start_line: { type: "number", description: "First line to read (1-indexed, inclusive)." }, - end_line: { type: "number", description: "Last line to read (1-indexed, inclusive)." }, - workspace_path: { type: "string", description: "Workspace root for resolving relative paths." }, - }, - required: ["path"], - }, - }, - { - name: "grep_search", - description: - "Search for text or regex patterns across files. Returns matching lines with file paths and line numbers. " + - "Use this to locate code before editing rather than guessing paths.", - inputSchema: { - type: "object", - properties: { - pattern: { type: "string", description: "Text or regex pattern to search for." }, - path: { type: "string", description: "Directory or file to search in (defaults to workspace root)." }, - glob: { type: "string", description: "Glob pattern to filter files, e.g. '**/*.mjs' or '*.ts'." }, - case_insensitive: { type: "boolean", description: "Case-insensitive search. Default: false." }, - max_results: { type: "number", description: "Maximum number of matching lines to return. Default: 50." }, - workspace_path: { type: "string", description: "Workspace root for resolving relative paths." }, - }, - required: ["pattern"], - }, - }, ]; } -// ── File tool helpers ────────────────────────────────────────────────────── - -function resolveFilePath(filePath, workspacePath) { - const p = String(filePath ?? "").trim(); - if (!p) throw new Error("path is required"); - if (p.match(/^([A-Za-z]:\\|\/)/)) return p; // already absolute - const base = workspacePath ? resolve(String(workspacePath).trim()) : process.cwd(); - return join(base, p); -} - -function countOccurrences(haystack, needle) { - let count = 0; - let pos = 0; - while ((pos = haystack.indexOf(needle, pos)) !== -1) { count++; pos += needle.length; } - return count; -} - -const GREP_SKIP_DIRS = new Set(["node_modules", ".git", ".bosun", "dist", "build", ".next", "coverage"]); - -function matchGlob(filePath, pattern) { - if (!pattern || pattern === "**/*") return true; - // Simple glob: supports *.ext and **/*.ext patterns only - let ext = ""; - if (pattern.startsWith("*.")) { - ext = pattern.slice(2); - } else if (pattern.startsWith("**/*.")) { - ext = pattern.slice(5); - } - if (ext && !pattern.includes("/")) return filePath.endsWith(`.${ext}`); - return true; -} - -function walkForGrep(dir, glob, callback) { - try { - const entries = readdirSync(dir, { withFileTypes: true }); - for (const entry of entries) { - const full = join(dir, entry.name); - if (entry.isDirectory()) { - if (!GREP_SKIP_DIRS.has(entry.name)) walkForGrep(full, glob, callback); - } else if (entry.isFile() && matchGlob(entry.name, glob)) { - if (callback(full) === false) return; - } - } - } catch { /* skip inaccessible dirs */ } -} - const BOSUN_TOOL_HANDLERS = { async bosun_status(runtime) { const backend = await runtime.ensureBackend(); @@ -1055,112 +920,6 @@ const BOSUN_TOOL_HANDLERS = { }); return response.data; }, - - // ── File system tools ────────────────────────────────────────────────────── - // These are implemented as local fs operations (not Bosun HTTP API calls) - // so they work reliably with correct encoding regardless of shell/platform. - - str_replace_editor(_runtime, args) { - const absPath = resolveFilePath(args.path, args.workspace_path); - if (!existsSync(absPath)) throw new Error(`File not found: ${absPath}`); - const original = readFileSync(absPath, "utf8"); - const oldStr = String(args.old_str ?? ""); - const newStr = repairCommonMojibake(String(args.new_str ?? "")); - if (!oldStr) throw new Error("old_str must not be empty"); - const idx = original.indexOf(oldStr); - if (idx === -1) { - throw new Error( - `str_replace_editor: old_str not found in ${absPath}.\n` + - `Verify the exact text including whitespace. ` + - `Tip: read_file the target first, then copy the exact text to old_str.`, - ); - } - const occurrences = countOccurrences(original, oldStr); - if (occurrences > 1) { - throw new Error( - `str_replace_editor: old_str matched ${occurrences} times in ${absPath}. ` + - `Add more surrounding context to make it unique.`, - ); - } - const updated = original.slice(0, idx) + newStr + original.slice(idx + oldStr.length); - writeFileSync(absPath, updated, "utf8"); - const lineNum = original.slice(0, idx).split("\n").length; - return { - success: true, - path: absPath, - replaced_at_line: lineNum, - occurrences_checked: occurrences, - repairedMojibake: newStr !== String(args.new_str ?? ""), - }; - }, - - write_file(_runtime, args) { - const absPath = resolveFilePath(args.path, args.workspace_path); - const rawContent = String(args.content ?? ""); - const content = repairCommonMojibake(rawContent); - const dir = dirname(absPath); - if (!existsSync(dir)) mkdirSync(dir, { recursive: true }); - writeFileSync(absPath, content, "utf8"); - const lineCount = content.split("\n").length; - return { - success: true, - path: absPath, - bytes_written: Buffer.byteLength(content, "utf8"), - lines: lineCount, - repairedMojibake: content !== rawContent, - }; - }, - - read_file(_runtime, args) { - const absPath = resolveFilePath(args.path, args.workspace_path); - if (!existsSync(absPath)) throw new Error(`File not found: ${absPath}`); - const raw = readFileSync(absPath, "utf8"); - const lines = raw.split("\n"); - const startLine = args.start_line ? Math.max(1, Number(args.start_line)) : 1; - const endLine = args.end_line ? Math.min(lines.length, Number(args.end_line)) : lines.length; - const sliced = lines.slice(startLine - 1, endLine); - const content = sliced - .map((line, i) => `${String(startLine + i).padStart(6)} | ${line}`) - .join("\n"); - return { path: absPath, start_line: startLine, end_line: startLine + sliced.length - 1, total_lines: lines.length, content }; - }, - - grep_search(_runtime, args) { - const pattern = String(args.pattern ?? ""); - if (!pattern) throw new Error("pattern is required"); - const base = args.workspace_path - ? resolve(args.workspace_path) - : process.cwd(); - const searchRoot = args.path - ? resolveFilePath(args.path, args.workspace_path) - : base; - const glob = String(args.glob ?? "**/*"); - const caseInsensitive = Boolean(args.case_insensitive); - const maxResults = Math.min(Number(args.max_results ?? 50), 500); - const flags = caseInsensitive ? "gi" : "g"; - let regex; - try { - regex = new RegExp(pattern, flags); - } catch { - regex = new RegExp(pattern.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"), flags); - } - const matches = []; - walkForGrep(searchRoot, glob, (filePath) => { - if (matches.length >= maxResults) return false; - try { - const text = readFileSync(filePath, "utf8"); - const fileLines = text.split("\n"); - for (let i = 0; i < fileLines.length; i++) { - if (matches.length >= maxResults) break; - if (regex.test(fileLines[i])) { - matches.push({ file: filePath, line: i + 1, text: fileLines[i].trim() }); - } - } - } catch { /* skip unreadable files */ } - return true; - }); - return { pattern, results: matches, total: matches.length, truncated: matches.length >= maxResults }; - }, }; async function callBosunTool(runtime, name, rawArgs = {}) { @@ -1193,8 +952,8 @@ export async function startBosunMcpServer(options = {}) { { capabilities: { tools: {} } }, ); - server.setRequestHandler(ResolvedListToolsRequestSchema, async () => handlers.listTools()); - server.setRequestHandler(ResolvedCallToolRequestSchema, async (request) => { + server.setRequestHandler(ListToolsRequestSchema, async () => handlers.listTools()); + server.setRequestHandler(CallToolRequestSchema, async (request) => { const name = String(request.params?.name || "").trim(); return handlers.callTool(name, request.params?.arguments || {}); }); @@ -1242,4 +1001,4 @@ if (isMainModule()) { console.error(`${TAG} failed to start: ${error?.stack || error?.message || error}`); process.exit(1); } -} +} \ No newline at end of file diff --git a/server/playwright-ui-e2e.mjs b/server/playwright-ui-e2e.mjs index 3c1e8891c..e57a2c2be 100644 --- a/server/playwright-ui-e2e.mjs +++ b/server/playwright-ui-e2e.mjs @@ -24,10 +24,7 @@ */ import { readFileSync } from "node:fs"; import { resolve } from "node:path"; -import * as playwrightTest from "@playwright/test"; - -const test = playwrightTest.test ?? playwrightTest.default?.test; -const expect = playwrightTest.expect ?? playwrightTest.default?.expect; +import { test, expect } from "@playwright/test"; const ROUTER_SOURCE = readFileSync(resolve(process.cwd(), "ui/modules/router.js"), "utf8"); diff --git a/server/playwright-ui-inspect.mjs b/server/playwright-ui-inspect.mjs index 0a3f21caa..224a56336 100644 --- a/server/playwright-ui-inspect.mjs +++ b/server/playwright-ui-inspect.mjs @@ -5,14 +5,11 @@ * 1. Start the mock UI server: node playwright-ui-server.mjs * 2. Run this: npx playwright test playwright-ui-inspect.mjs --headed */ -import * as playwrightTest from "@playwright/test"; +import { test, expect } from "@playwright/test"; import { resolve, dirname } from "node:path"; import { mkdirSync } from "node:fs"; import { fileURLToPath } from "node:url"; -const test = playwrightTest.test ?? playwrightTest.default?.test; -const expect = playwrightTest.expect ?? playwrightTest.default?.expect; - const __dirname = dirname(fileURLToPath(import.meta.url)); const SCREENSHOTS_DIR = resolve(__dirname, "playwright-screenshots"); mkdirSync(SCREENSHOTS_DIR, { recursive: true }); diff --git a/server/playwright-ui-server.mjs b/server/playwright-ui-server.mjs index 54192a7d0..dc2304ca3 100644 --- a/server/playwright-ui-server.mjs +++ b/server/playwright-ui-server.mjs @@ -10,12 +10,7 @@ import { readFile, writeFile, mkdir } from "node:fs/promises"; import { existsSync } from "node:fs"; import { resolve, extname, dirname } from "node:path"; import { fileURLToPath } from "node:url"; -import * as wsModule from "ws"; - -const WebSocketServer = - wsModule.WebSocketServer ?? - wsModule.default?.WebSocketServer ?? - wsModule.default?.Server; +import { WebSocketServer } from "ws"; const __dirname = dirname(fileURLToPath(import.meta.url)); const uiRoot = resolve(__dirname, "..", "ui"); diff --git a/server/playwright-ui-smoke.mjs b/server/playwright-ui-smoke.mjs index 2209cc525..fa447f6c0 100644 --- a/server/playwright-ui-smoke.mjs +++ b/server/playwright-ui-smoke.mjs @@ -1,7 +1,4 @@ -import * as playwrightTest from "@playwright/test"; - -const test = playwrightTest.test ?? playwrightTest.default?.test; -const expect = playwrightTest.expect ?? playwrightTest.default?.expect; +import { test, expect } from "@playwright/test"; const CRITICAL_ROUTES = [ { path: "/", label: "portal home" }, @@ -72,4 +69,4 @@ test.describe("Portal UI smoke", () => { } }); } -}); +}); \ No newline at end of file diff --git a/server/setup-web-server.mjs b/server/setup-web-server.mjs index 580ad3579..027c71048 100644 --- a/server/setup-web-server.mjs +++ b/server/setup-web-server.mjs @@ -17,7 +17,7 @@ import { readFile } from "node:fs/promises"; import { resolve, dirname, extname } from "node:path"; import { fileURLToPath } from "node:url"; import { createRequire } from "node:module"; -import { execSync as nodeExecSync } from "node:child_process"; +import { execSync } from "node:child_process"; import { homedir } from "node:os"; import { ensureTestRuntimeSandbox } from "../infra/test-runtime.mjs"; import { scaffoldSkills } from "../agent/bosun-skills.mjs"; @@ -33,13 +33,6 @@ import { discoverTelegramChats } from "../telegram/get-telegram-chat-id.mjs"; const __dirname = dirname(fileURLToPath(import.meta.url)); -function execSync(command, options = {}) { - return nodeExecSync(command, { - ...options, - windowsHide: options.windowsHide ?? (process.platform === "win32"), - }); -} - function trimTrailingSlashes(value) { let out = String(value || ""); while (out.endsWith("/")) out = out.slice(0, -1); @@ -95,11 +88,7 @@ function buildModelsProbeRequest({ apiKey = "", baseUrl = "" } = {}) { return { endpoint: parsed.toString(), headers }; } - if ( - (isAzure && !(lowerPath === "/openai/v1" || lowerPath.startsWith("/openai/v1/"))) - || lowerPath === "/openai" - || lowerPath.startsWith("/openai/") - ) { + if (isAzure || lowerPath === "/openai" || lowerPath.startsWith("/openai/")) { parsed.pathname = "/openai/models"; parsed.search = ""; parsed.searchParams.set("api-version", "2024-10-21"); diff --git a/server/ui-server.mjs b/server/ui-server.mjs index 8af5f3119..38b232467 100644 --- a/server/ui-server.mjs +++ b/server/ui-server.mjs @@ -1,11 +1,9 @@ -import { execSync as nodeExecSync, spawn, spawnSync, exec } from "node:child_process"; -import { Worker } from "node:worker_threads"; -import { randomUUID as _genCallId } from "node:crypto"; +import { execSync, spawn, spawnSync } from "node:child_process"; import * as nodeCrypto from "node:crypto"; import { existsSync, mkdirSync, readFileSync, chmodSync, createWriteStream, createReadStream, writeFileSync, unlinkSync, watchFile, unwatchFile, readdirSync, statSync } from "node:fs"; import { open, readFile, readdir, stat, writeFile } from "node:fs/promises"; -import { createServer, request as httpRequest } from "node:http"; -import { get as httpsGet, request as httpsRequest } from "node:https"; +import { createServer } from "node:http"; +import { get as httpsGet } from "node:https"; import { createServer as createHttpsServer } from "node:https"; import { networkInterfaces, homedir, userInfo as getOsUserInfo } from "node:os"; import { connect as netConnect } from "node:net"; @@ -15,18 +13,10 @@ import { createRequire } from "node:module"; import { arch as osArch, platform as osPlatform } from "node:os"; import { gzip as zlibGzip } from "node:zlib"; import { promisify } from "node:util"; -import * as wsModule from "ws"; import Ajv2020 from "ajv/dist/2020.js"; const gzipAsync = promisify(zlibGzip); -function execSync(command, options = {}) { - return nodeExecSync(command, { - ...options, - windowsHide: options.windowsHide ?? (process.platform === "win32"), - }); -} - const { createHash, createHmac, @@ -64,7 +54,6 @@ async function compressAndSend(req, res, statusCode, headers, body) { // Lightweight TTL cache for expensive API responses const _apiCache = new Map(); -const _apiInflight = new Map(); function getCachedApiResponse(key, ttlMs) { const entry = _apiCache.get(key); if (!entry) return undefined; @@ -82,38 +71,12 @@ function invalidateApiCache(prefix) { for (const key of _apiCache.keys()) { if (key.startsWith(prefix)) _apiCache.delete(key); } - for (const key of _apiInflight.keys()) { - if (key.startsWith(prefix)) _apiInflight.delete(key); - } -} -async function getOrComputeCachedApiResponse(key, ttlMs, producer) { - const cached = getCachedApiResponse(key, ttlMs); - if (cached !== undefined) return cached; - - const inflight = _apiInflight.get(key); - if (inflight) return inflight; - - const pending = Promise.resolve() - .then(producer) - .then((value) => { - setCachedApiResponse(key, value); - return value; - }) - .finally(() => { - _apiInflight.delete(key); - }); - - _apiInflight.set(key, pending); - return pending; } // Static file ETag + cache header helper function cacheControlForPath(pathname) { if (pathname.endsWith(".html")) return "no-cache"; - if (/\.(js|mjs|css)$/i.test(pathname)) { - return "no-cache"; - } - if (/\.(svg|png|jpg|jpeg|gif|webp|ico|woff2?)$/i.test(pathname)) { + if (/\.(js|mjs|css|svg|png|jpg|jpeg|gif|webp|ico|woff2?)$/i.test(pathname)) { return "public, max-age=3600, stale-while-revalidate=86400"; } return "public, max-age=300"; @@ -133,10 +96,7 @@ function getLocalLanIp() { } return "localhost"; } -const WebSocketServer = - wsModule.WebSocketServer ?? - wsModule.default?.WebSocketServer ?? - wsModule.default?.Server; +import { WebSocketServer } from "ws"; import { getKanbanAdapter, getKanbanBackendName, @@ -182,7 +142,6 @@ import { getManifestPath, scaffoldAgentProfiles, getBosunHomeDir, - hasUnresolvedTemplateTokens, syncAutoDiscoveredLibraryEntries, resolveAgentProfileLibraryMetadata, } from "../infra/library-manager.mjs"; @@ -201,14 +160,6 @@ import { getEnabledHookIds, getHooksAsLibraryEntries, } from "../agent/hook-library.mjs"; -import { - assessInputQuality, - detectRepoGuardrails, - ensureGuardrailsPolicy, - getGuardrailsPolicyPath, - loadGuardrailsPolicy, - saveGuardrailsPolicy, -} from "../infra/guardrails.mjs"; import { listCatalog, getCatalogEntry, @@ -337,9 +288,6 @@ import { resolveTuiAuthToken, } from "../infra/tui-bridge.mjs"; import { setComponentStatus } from "../infra/health-status.mjs"; -import { VaultStore } from "../lib/vault.mjs"; -import { keychainGetOrCreate } from "../lib/vault-keychain.mjs"; -import { INTEGRATIONS } from "../lib/integrations-registry.mjs"; const TASK_STORE_MODULE_PATH = "../task/task-store.mjs"; const TASK_STORE_START_GUARD_EXPORTS = [ @@ -1301,210 +1249,6 @@ function evaluateVoiceToolPolicy({ return { allow: true, statusCode: 200, message: "ok" }; } -// ── Workflow engine Worker-thread proxy ──────────────────────────────────────── -/** - * WorkflowEngineProxy hosts the WorkflowEngine in a dedicated Worker thread so - * that heavy node execution (execFileSync replacements, agent launches, etc.) - * can never block the HTTP / WebSocket event loop in the UI server process. - * - * The proxy mirrors the WorkflowEngine EventEmitter interface so that - * attachWorkflowEngineLiveBridge() works without modification. - */ -class WorkflowEngineProxy { - isWorkflowEngineProxy = true; - - constructor() { - this._worker = null; - this._pending = new Map(); // callId → { resolve, reject } - this._listeners = new Map(); // eventName → Set - this._ready = false; - this._initPromise = null; - } - - /** Start the Worker thread and wait for "ready". */ - _start(cfg = {}) { - if (this._initPromise !== null) return this._initPromise; - this._initPromise = new Promise((resolve, reject) => { - const workerPath = fileURLToPath(new URL("./workflow-engine-worker.mjs", import.meta.url)); - this._worker = new Worker(workerPath, { - workerData: cfg, - /* stdout/stderr inherit so worker logs appear in the same console */ - }); - - const failStart = (err) => { - if (!this._worker) { - reject(err); - return; - } - this._worker.off("message", onReady); - this._worker.off("message", onInitError); - reject(err); - }; - - const onReady = (msg) => { - if (!msg || msg.type !== "ready") return; - this._worker.off("message", onReady); - this._worker.off("message", onInitError); - this._ready = true; - resolve(); - }; - const onInitError = (msg) => { - if (!msg || msg.type !== "error" || msg.callId != null) return; - const err = new Error(msg.error || "workflow engine worker failed to initialize"); - if (msg.stack) err.stack = msg.stack; - failStart(err); - }; - this._worker.on("message", onReady); - this._worker.on("message", onInitError); - this._worker.once("error", failStart); - - this._worker.on("message", (msg) => this._onMessage(msg)); - this._worker.on("error", (err) => { - console.error("[wf-worker] worker thread error:", err.message); - }); - this._worker.on("exit", (code) => { - if (code !== 0) console.warn(`[wf-worker] worker exited with code ${code}`); - this._ready = false; - }); - - /* Send init after attaching all listeners */ - this._worker.postMessage({ type: "init", workerData: cfg }); - }); - return this._initPromise; - } - - _onMessage(msg) { - if (!msg || typeof msg.type !== "string") return; - - if (msg.type === "result" || msg.type === "error") { - const p = this._pending.get(msg.callId); - if (!p) return; - this._pending.delete(msg.callId); - if (msg.type === "error") { - const err = new Error(msg.error || "workflow engine error"); - if (msg.stack) err.stack = msg.stack; - p.reject(err); - } else { - p.resolve(msg.result); - } - return; - } - - if (msg.type === "event") { - const handlers = this._listeners.get(msg.eventName); - if (handlers) { - for (const h of handlers) { - try { h(msg.payload); } catch { /* best-effort */ } - } - } - return; - } - - if (msg.type === "svc-call") { - /* Service call from worker → execute in main thread and respond */ - this._dispatchServiceCall(msg.callId, msg.method, msg.args || []); - } - } - - async _dispatchServiceCall(callId, method, args) { - try { - const result = await this._executeService(method, args); - this._worker.postMessage({ type: "svc-res", callId, result }); - } catch (err) { - this._worker.postMessage({ type: "svc-res", callId, error: err.message, code: err.code }); - } - } - - async _executeService(method, args) { - /* Dispatch to in-process service functions */ - const [svc, fn] = method.split("."); - switch (svc) { - case "agentPool": { - if (fn === "launchEphemeralThread") return launchEphemeralThread(...args); - if (fn === "launchOrResumeThread") return launchOrResumeThread(...args); - if (fn === "execWithRetry") return execWithRetry(args[0], () => Promise.resolve(), args[2] || {}); - if (fn === "continueSession") { - const [sessionId, prompt, opts = {}] = args; - return launchEphemeralThread(prompt, opts.cwd || process.cwd(), opts.timeout || 3600000, { resumeThreadId: sessionId, sdk: opts.sdk }); - } - if (fn === "killSession") { - try { invalidateThread(args[0]); return true; } catch { return false; } - } - break; - } - case "telegram": { - if (fn === "sendMessage") return sendWorkflowTelegramMessage(args[0], args[1], args[2] || {}); - break; - } - case "kanban": { - const adapter = getKanbanAdapter(); - if (!adapter) throw new Error("Kanban adapter not available"); - if (fn === "createTask") return adapter.createTask?.(...args); - if (fn === "updateTaskStatus") return adapter.updateTaskStatus?.(...args); - if (fn === "listTasks") return adapter.listTasks?.(...args); - if (fn === "getTask") return adapter.getTask?.(...args); - break; - } - case "meeting": { - const meetMod = await import("../workflow/meeting-workflow-service.mjs").catch(() => null); - const svc_ = meetMod?.createMeetingWorkflowService?.(); - if (svc_ && typeof svc_[fn] === "function") return svc_[fn](...args); - break; - } - } - throw new Error(`Unknown service call: ${method}`); - } - - /** Proxy a method call to the worker and return a Promise of the result. */ - _call(method, args) { - return new Promise((resolve, reject) => { - if (!this._ready || !this._worker) { - return reject(new Error("Workflow engine worker not ready")); - } - const callId = _genCallId(); - this._pending.set(callId, { resolve, reject }); - this._worker.postMessage({ type: "call", callId, method, args }); - }); - } - - /* ── EventEmitter interface (subset used by attachWorkflowEngineLiveBridge) ── */ - on(eventName, handler) { - let handlers = this._listeners.get(eventName); - if (!handlers) { handlers = new Set(); this._listeners.set(eventName, handlers); } - handlers.add(handler); - return this; - } - off(eventName, handler) { - this._listeners.get(eventName)?.delete(handler); - return this; - } - - /* ── WorkflowEngine API surface ── */ - execute(workflowId, input, opts) { return this._call("execute", [workflowId, input, opts]); } - evaluateTriggers(eventType, payload) { return this._call("evaluateTriggers", [eventType, payload]); } - get(workflowId) { return this._call("get", [workflowId]); } - list(opts) { return this._call("list", [opts]); } - getRunHistory(workflowId, opts) { return this._call("getRunHistory", [workflowId, opts]); } - getRunHistoryPage(workflowId, opts) { return this._call("getRunHistoryPage", [workflowId, opts]); } - getRunDetail(runId, opts) { return this._call("getRunDetail", [runId, opts]); } - getRunForensics(runId) { return this._call("getRunForensics", [runId]); } - getNodeForensics(runId, nodeId) { return this._call("getNodeForensics", [runId, nodeId]); } - getRetryOptions(runId) { return this._call("getRetryOptions", [runId]); } - retryRun(runId, opts) { return this._call("retryRun", [runId, opts]); } - restoreFromSnapshot(runId, opts) { return this._call("restoreFromSnapshot", [runId, opts]); } - cancelRun(runId) { return this._call("cancelRun", [runId]); } - createRunSnapshot(runId, opts) { return this._call("createRunSnapshot", [runId, opts]); } - listSnapshots(workflowId) { return this._call("listSnapshots", [workflowId]); } - save(workflow) { return this._call("save", [workflow]); } - import(workflow) { return this._call("import", [workflow]); } - delete(workflowId) { return this._call("delete", [workflowId]); } - getConcurrencyStats() { return this._call("getConcurrencyStats", []); } - getTaskTraceEvents(opts) { return this._call("getTaskTraceEvents", [opts]); } - load() { return this._call("load", []); } - resumeInterruptedRuns() { return this._call("resumeInterruptedRuns", []); } - registerTaskTraceHook() { return null; /* hooks cannot cross thread boundary */ } -} - // ── Workflow engine lazy-loader (module-scope cache) ────────────────────────── let _wfEngine; let _wfNodes; @@ -1851,43 +1595,23 @@ async function getWorkflowEngineModule() { _wfServicesReady = true; if (shouldBootstrapDefaultWorkflowSingleton()) { - /* ── Start the workflow engine Worker thread (complete process decoupling) ── */ - const defaultPaths = getWorkflowStoragePaths(repoRoot); - const proxy = new WorkflowEngineProxy(); - try { - await proxy._start({ - repoRoot, - workflowDir: defaultPaths.workflowDir, - runsDir: defaultPaths.runsDir, + const engine = _wfEngine.getWorkflowEngine({ services }); + attachWorkflowEngineLiveBridge(engine); + if (!_wfTaskTraceHookRegistered && typeof engine?.registerTaskTraceHook === "function") { + engine.registerTaskTraceHook((event) => { + handleTaskWorkflowTraceEvent(event); }); - const defaultKey = getWorkflowWorkspaceKey(defaultPaths.workspaceRoot); - _wfEngineByWorkspace.set(defaultKey, proxy); - attachWorkflowEngineLiveBridge(proxy); - console.log("[workflows] Workflow engine Worker thread started"); + _wfTaskTraceHookRegistered = true; + } + // Resume any runs that were interrupted by a previous shutdown. + // This must happen AFTER services are wired so node executors work. + if (typeof engine.resumeInterruptedRuns === "function") { setTimeout(() => { - proxy.resumeInterruptedRuns().catch((err) => { + engine.resumeInterruptedRuns().catch((err) => { console.warn("[workflows] Failed to resume interrupted runs:", err.message); }); }, 0); - } catch (err) { - console.warn("[workflows] Worker thread start failed, falling back to in-process engine:", err.message); - /* Fall back: create engine in-process */ - const engine = _wfEngine.getWorkflowEngine({ services }); - attachWorkflowEngineLiveBridge(engine); - if (!_wfTaskTraceHookRegistered && typeof engine?.registerTaskTraceHook === "function") { - engine.registerTaskTraceHook((event) => { - handleTaskWorkflowTraceEvent(event); - }); - _wfTaskTraceHookRegistered = true; - } - if (typeof engine.resumeInterruptedRuns === "function") { - setTimeout(() => { - engine.resumeInterruptedRuns().catch((err) => { - console.warn("[workflows] Failed to resume interrupted runs:", err.message); - }); - }, 0); - } } } else { _wfRecommendedInstalled = true; @@ -1899,13 +1623,7 @@ async function getWorkflowEngineModule() { if (!_wfRecommendedInstalled && _wfTemplates && shouldBootstrapDefaultWorkflowSingleton()) { try { - const defaultPaths = getWorkflowStoragePaths(repoRoot); - const defaultKey = getWorkflowWorkspaceKey(defaultPaths.workspaceRoot); - const engine = _wfEngineByWorkspace.get(defaultKey) || _wfEngine.getWorkflowEngine(); - if (engine?.isWorkflowEngineProxy) { - _wfRecommendedInstalled = true; - return; - } + const engine = _wfEngine.getWorkflowEngine(); attachWorkflowEngineLiveBridge(engine); const selection = resolveWorkflowBootstrapSelection(_wfTemplates); let result = { installed: [], skipped: [], errors: [] }; @@ -2171,34 +1889,17 @@ async function collectWorkflowRunsForTask(taskId, reqUrl, limit = 40) { const out = []; for (const summary of summaries) { if (!summary?.runId) continue; - const summaryTaskIds = Array.isArray(summary?.taskIds) - ? summary.taskIds.map((value) => String(value || "").trim()).filter(Boolean) - : []; - const primaryTaskId = String(summary?.taskId || "").trim(); - let matches = primaryTaskId === normalizedTaskId || summaryTaskIds.includes(normalizedTaskId); - let data = {}; - let traceEvents = []; - if (!matches && engine.getRunDetail) { - const detail = engine.getRunDetail(summary.runId); - if (!detail?.detail) continue; - data = detail.detail?.data || {}; - const detailTaskId = String(data.taskId || data.activeTaskId || data?.task?.id || "").trim(); - matches = detailTaskId === normalizedTaskId; - } + const detail = engine.getRunDetail ? engine.getRunDetail(summary.runId) : null; + if (!detail?.detail) continue; + const data = detail.detail?.data || {}; + const primaryTaskId = String(data.taskId || data.activeTaskId || data?.task?.id || "").trim(); + let matches = primaryTaskId === normalizedTaskId; if (!matches && typeof engine.getTaskTraceEvents === "function") { - traceEvents = engine.getTaskTraceEvents(summary.runId) || []; + const traceEvents = engine.getTaskTraceEvents(summary.runId) || []; matches = traceEvents.some((event) => String(event?.taskId || "").trim() === normalizedTaskId); } if (!matches) continue; const primarySessionId = (() => { - for (const value of [ - summary?.primarySessionId, - summary?.sessionId, - ...(Array.isArray(summary?.sessionIds) ? summary.sessionIds : []), - ]) { - const normalized = String(value || "").trim(); - if (normalized) return normalized; - } for (const value of [ data.sessionId, data.threadId, @@ -2208,6 +1909,9 @@ async function collectWorkflowRunsForTask(taskId, reqUrl, limit = 40) { const normalized = String(value || "").trim(); if (normalized) return normalized; } + const traceEvents = typeof engine.getTaskTraceEvents === "function" + ? engine.getTaskTraceEvents(summary.runId) || [] + : []; for (let index = traceEvents.length - 1; index >= 0; index -= 1) { const event = traceEvents[index]; for (const value of [ @@ -2223,17 +1927,17 @@ async function collectWorkflowRunsForTask(taskId, reqUrl, limit = 40) { return null; })(); out.push({ - runId: summary.runId, - workflowId: summary.workflowId, - workflowName: summary.workflowName, - status: summary.status, - outcome: summary.status, - summary: summary.status === "failed" - ? `Workflow run failed (${summary.workflowName || summary.workflowId || summary.runId})` - : `Workflow run ${summary.status || "completed"} (${summary.workflowName || summary.workflowId || summary.runId})`, - startedAt: summary.startedAt || null, - endedAt: summary.endedAt || null, - duration: summary.duration || null, + runId: detail.runId, + workflowId: detail.workflowId, + workflowName: detail.workflowName, + status: detail.status, + outcome: detail.status, + summary: detail.status === "failed" + ? `Workflow run failed (${detail.workflowName || detail.workflowId || detail.runId})` + : `Workflow run ${detail.status || "completed"} (${detail.workflowName || detail.workflowId || detail.runId})`, + startedAt: detail.startedAt || null, + endedAt: detail.endedAt || null, + duration: detail.duration || null, sessionId: null, primarySessionId, source: "workflow", @@ -2257,48 +1961,6 @@ function sanitizeTaskDiagnosticText(value, maxLength = 240) { return `${normalized.slice(0, Math.max(0, maxLength - 1)).trimEnd()}…`; } -const TASK_LOG_DIAGNOSTICS_TAIL_BYTES = 256 * 1024; -const TASK_LOG_DIAGNOSTICS_CACHE_MS = 5000; - -function createEmptyTaskLogDiagnostics() { - return { - counts: { - prePrValidationFailed: 0, - worktreeFailed: 0, - blockedTransitions: 0, - createPrFailed: 0, - }, - entries: [], - }; -} - -async function readLogTailChunk(filePath, maxBytes = TASK_LOG_DIAGNOSTICS_TAIL_BYTES) { - const handle = await open(filePath, "r"); - try { - const info = await handle.stat(); - const size = Number(info?.size || 0); - if (!Number.isFinite(size) || size <= 0) return ""; - const length = Math.max(1, Math.min(size, Math.max(1, Math.trunc(maxBytes)))); - const offset = Math.max(0, size - length); - const buffer = Buffer.alloc(length); - await handle.read(buffer, 0, length, offset); - let text = buffer.toString("utf8"); - if (offset > 0) { - const firstNewline = text.indexOf("\n"); - if (firstNewline >= 0) { - text = text.slice(firstNewline + 1); - } - } - return text; - } finally { - try { - await handle.close(); - } catch { - // best effort - } - } -} - function collectTaskTimelineDiagnostics(task, limit = 8) { const timeline = Array.isArray(task?.timeline) ? task.timeline : []; const relevant = []; @@ -2332,17 +1994,22 @@ function collectTaskTimelineDiagnostics(task, limit = 8) { return relevant.slice(-Math.max(1, limit)); } -async function collectTaskLogDiagnostics(task, workspaceDir = "", limit = 8) { +function collectTaskLogDiagnostics(task, workspaceDir = "", limit = 8) { const taskId = String(task?.id || task?.taskId || "").trim(); if (!taskId) { - return createEmptyTaskLogDiagnostics(); + return { + counts: { + prePrValidationFailed: 0, + worktreeFailed: 0, + blockedTransitions: 0, + createPrFailed: 0, + }, + entries: [], + }; } const taskBranch = String(task?.branch || task?.branchName || "").trim(); const needles = [taskId, taskBranch].filter((value) => value && value.length >= 8); - if (needles.length === 0) { - return createEmptyTaskLogDiagnostics(); - } const logPaths = []; const pushLogPath = (candidate) => { if (!candidate || !existsSync(candidate) || logPaths.includes(candidate)) return; @@ -2355,69 +2022,57 @@ async function collectTaskLogDiagnostics(task, workspaceDir = "", limit = 8) { pushLogPath(resolve(repoRoot, ".bosun", "logs", "monitor-error.log")); pushLogPath(resolve(repoRoot, ".bosun", "logs", "monitor.log")); - const cacheKey = `task-log-diagnostics:${taskId}:${taskBranch}:${normalizeCandidatePath(workspaceDir) || repoRoot}`; - return getOrComputeCachedApiResponse(cacheKey, TASK_LOG_DIAGNOSTICS_CACHE_MS, async () => { - const counts = { - prePrValidationFailed: 0, - worktreeFailed: 0, - blockedTransitions: 0, - createPrFailed: 0, - }; - const entries = []; - const logChunks = await Promise.all( - logPaths.map(async (logPath) => { - try { - return { - logPath, - raw: await readLogTailChunk(logPath), - }; - } catch { - return null; - } - }), - ); + const counts = { + prePrValidationFailed: 0, + worktreeFailed: 0, + blockedTransitions: 0, + createPrFailed: 0, + }; + const entries = []; - for (const chunk of logChunks) { - if (!chunk?.raw) continue; - const logName = /monitor-error\.log$/i.test(chunk.logPath) - ? "monitor-error.log" - : "monitor.log"; - for (const line of chunk.raw.split(/\r?\n/)) { - if (!line) continue; - if (!needles.some((needle) => line.includes(needle))) continue; - const text = sanitizeTaskDiagnosticText(line, 320); - let matched = false; - if (/pre-PR validation failed/i.test(text)) { - counts.prePrValidationFailed += 1; - matched = true; - } - if (/Worktree failed for/i.test(text) || /Worktree acquisition failed/i.test(text)) { - counts.worktreeFailed += 1; - matched = true; - } - if (/-> blocked/i.test(text) || /status: .*blocked/i.test(text)) { - counts.blockedTransitions += 1; - matched = true; - } - if (/create-pr FAILED/i.test(text)) { - counts.createPrFailed += 1; - matched = true; - } - if (!matched) continue; - entries.push({ - source: logName, - message: text, - kind: "log", - }); - if (entries.length > limit) entries.shift(); - } + for (const logPath of logPaths) { + let raw = ""; + try { + raw = readFileSync(logPath, "utf8"); + } catch { + continue; } + const logName = /monitor-error\.log$/i.test(logPath) ? "monitor-error.log" : "monitor.log"; + for (const line of raw.split(/\r?\n/)) { + if (!line) continue; + if (!needles.some((needle) => line.includes(needle))) continue; + const text = sanitizeTaskDiagnosticText(line, 320); + let matched = false; + if (/pre-PR validation failed/i.test(text)) { + counts.prePrValidationFailed += 1; + matched = true; + } + if (/Worktree failed for/i.test(text) || /Worktree acquisition failed/i.test(text)) { + counts.worktreeFailed += 1; + matched = true; + } + if (/-> blocked/i.test(text) || /status: .*blocked/i.test(text)) { + counts.blockedTransitions += 1; + matched = true; + } + if (/create-pr FAILED/i.test(text)) { + counts.createPrFailed += 1; + matched = true; + } + if (!matched) continue; + entries.push({ + source: logName, + message: text, + kind: "log", + }); + if (entries.length > limit) entries.shift(); + } + } - return { counts, entries }; - }); + return { counts, entries }; } -async function buildTaskBlockedContext(task, options = {}) { +function buildTaskBlockedContext(task, options = {}) { const currentTask = task && typeof task === "object" ? task : {}; const canStart = options.canStart && typeof options.canStart === "object" ? options.canStart @@ -2437,7 +2092,7 @@ async function buildTaskBlockedContext(task, options = {}) { ? currentTask.workflowRuns : []; const timelineEvidence = collectTaskTimelineDiagnostics(currentTask, 6); - const logDiagnostics = await collectTaskLogDiagnostics( + const logDiagnostics = collectTaskLogDiagnostics( currentTask, normalizeCandidatePath(options.workspaceDir), 6, @@ -2597,38 +2252,22 @@ async function getWorkflowRequestContext(reqUrl, options = {}) { if (_testDefaultEngine) { engine = _testDefaultEngine; } else { - const preferInProcessEngine = Boolean(process.env.VITEST); - if (!preferInProcessEngine) { - /* Use Worker thread proxy for complete decoupling from the HTTP event loop */ - const proxy = new WorkflowEngineProxy(); - try { - await proxy._start({ - repoRoot, - workflowDir: paths.workflowDir, - runsDir: paths.runsDir, - }); - engine = proxy; - } catch (startErr) { - console.warn("[workflows] Worker thread unavailable, using in-process engine:", startErr.message); - } - } - if (!engine) { - engine = new wfMod.WorkflowEngine({ - workflowDir: paths.workflowDir, - runsDir: paths.runsDir, - detectInterruptedRuns: false, - services: _wfServices || {}, - onTaskWorkflowEvent: handleTaskWorkflowTraceEvent, + engine = new wfMod.WorkflowEngine({ + workflowDir: paths.workflowDir, + runsDir: paths.runsDir, + detectInterruptedRuns: false, + services: _wfServices || {}, + onTaskWorkflowEvent: handleTaskWorkflowTraceEvent, + }); + attachWorkflowEngineLiveBridge(engine); + if (typeof engine.registerTaskTraceHook === "function") { + engine.registerTaskTraceHook((event) => { + handleTaskWorkflowTraceEvent(event); }); - if (typeof engine.registerTaskTraceHook === "function") { - engine.registerTaskTraceHook((event) => { - handleTaskWorkflowTraceEvent(event); - }); - } - engine.load(); } - attachWorkflowEngineLiveBridge(engine); + engine.load(); } + attachWorkflowEngineLiveBridge(engine); _wfEngineByWorkspace.set(workspaceKey, engine); } if (options.bootstrapTemplates !== false) { @@ -2714,7 +2353,7 @@ function collectTaskLinkedSessionIds(task, tracker = null) { const taskKeySet = new Set(taskKeys); const sessionTracker = tracker || getSessionTracker(); const sessions = typeof sessionTracker?.listAllSessions === "function" - ? sessionTracker.listAllSessions({ includePersisted: true }) + ? sessionTracker.listAllSessions() : []; for (const session of sessions) { const sessionTaskId = normalizeDiffTaskRef(session?.taskId); @@ -2972,75 +2611,6 @@ function taskMatchesWorkspaceContext(task, workspaceContext) { ); } -function resolveWorkspaceFleetConfig(workspaceContext = {}) { - const configDir = resolveUiConfigDir(); - if (!configDir) return null; - const listed = listManagedWorkspaces(configDir, { repoRoot }); - const workspaceId = String(workspaceContext?.workspaceId || "").trim().toLowerCase(); - const workspace = - (workspaceId - ? listed.find((entry) => String(entry?.id || "").trim().toLowerCase() === workspaceId) - : null) || - getActiveManagedWorkspace(configDir) || - listed[0] || - null; - return workspace && typeof workspace === "object" ? workspace : null; -} - -function buildWorkspaceExecutorSummary(execStatus, workspaceContext) { - if (!execStatus || typeof execStatus !== "object" || !workspaceContext) return null; - const allSlots = Array.isArray(execStatus.slots) - ? execStatus.slots.map((slot, slotIndex) => ({ ...slot, slotIndex })) - : []; - const workspace = resolveWorkspaceFleetConfig(workspaceContext); - const executors = - workspace?.executors && typeof workspace.executors === "object" - ? workspace.executors - : null; - const configuredSlots = Math.max(0, Number(executors?.maxConcurrent || 0) || 0); - const globalMaxParallel = Math.max( - 0, - Number(execStatus.maxParallel || 0) || allSlots.length, - ); - const taskIndex = new Map(); - for (const task of getAllInternalTasks()) { - const taskId = String(task?.id || task?.taskId || "").trim(); - if (taskId) taskIndex.set(taskId, task); - } - const slots = workspaceContext.allWorkspaces - ? allSlots - : allSlots.filter((slot) => { - const taskId = String(slot?.taskId || "").trim(); - const task = taskIndex.get(taskId) || { id: taskId, workspace: "", meta: {} }; - return taskMatchesWorkspaceContext(task, workspaceContext); - }); - const activeSlots = slots.filter((slot) => { - const status = String(slot?.status || "").trim().toLowerCase(); - return status === "running" || status === "busy"; - }).length; - const maxParallel = configuredSlots > 0 - ? configuredSlots - : Math.max(globalMaxParallel, slots.length, activeSlots); - return { - workspaceId: String(workspaceContext.workspaceId || "").trim(), - workspaceDir: normalizeCandidatePath(workspaceContext.workspaceDir) || repoRoot, - workspaceRoot: - normalizeCandidatePath(workspaceContext.workspaceRoot) - || normalizeCandidatePath(workspaceContext.workspaceDir) - || repoRoot, - workspaceName: String(workspace?.name || workspace?.id || workspaceContext.workspaceId || "All workspaces").trim(), - pool: String(executors?.pool || "shared").trim() || "shared", - configuredSlots, - maxParallel, - activeSlots, - freeSlots: Math.max(0, maxParallel - activeSlots), - capacityPct: maxParallel > 0 ? Math.round((activeSlots / maxParallel) * 100) : 0, - globalMaxParallel, - globalActiveSlots: Math.max(0, Number(execStatus.activeSlots || 0) || 0), - slots, - }; -} - async function listTasksForWorkspaceContext(workspaceContext, { status = "", projectId = "" } = {}) { const adapter = getKanbanAdapter(); const projects = await adapter.listProjects(); @@ -3072,41 +2642,30 @@ async function collectBenchmarkWorkflowRuns(reqUrl, taskIds = new Set(), limit = const runs = []; for (const summary of summaries) { if (!summary?.runId) continue; - const summaryTaskIds = Array.isArray(summary?.taskIds) - ? summary.taskIds.map((value) => String(value || "").trim()).filter(Boolean) - : []; - const primaryTaskId = String(summary?.taskId || "").trim(); + const detail = wfCtx.engine.getRunDetail ? wfCtx.engine.getRunDetail(summary.runId) : null; + if (!detail?.detail) continue; + const data = detail.detail?.data || {}; + const primaryTaskId = String( + data.taskId || data.activeTaskId || data?.task?.id || "", + ).trim(); let matches = Boolean(primaryTaskId && taskIds.has(primaryTaskId)); - if (!matches && summaryTaskIds.length > 0) { - matches = summaryTaskIds.some((taskId) => taskIds.has(taskId)); - } - if (!matches && typeof wfCtx.engine.getRunDetail === "function") { - const detail = wfCtx.engine.getRunDetail(summary.runId); - if (detail?.detail) { - const data = detail.detail?.data || {}; - const detailTaskId = String( - data.taskId || data.activeTaskId || data?.task?.id || "", - ).trim(); - matches = Boolean(detailTaskId && taskIds.has(detailTaskId)); - } - } if (!matches && typeof wfCtx.engine.getTaskTraceEvents === "function") { const traceEvents = wfCtx.engine.getTaskTraceEvents(summary.runId) || []; matches = traceEvents.some((event) => taskIds.has(String(event?.taskId || "").trim())); } if (!matches) continue; runs.push({ - runId: summary.runId, - workflowId: summary.workflowId, - workflowName: summary.workflowName, - status: summary.status, - startedAt: summary.startedAt || null, - endedAt: summary.endedAt || null, - duration: summary.duration || null, + runId: detail.runId, + workflowId: detail.workflowId, + workflowName: detail.workflowName, + status: detail.status, + startedAt: detail.startedAt || null, + endedAt: detail.endedAt || null, + duration: detail.duration || null, summary: - summary.status === "failed" - ? `Workflow run failed (${summary.workflowName || summary.workflowId || summary.runId})` - : `Workflow run ${summary.status || "completed"} (${summary.workflowName || summary.workflowId || summary.runId})`, + detail.status === "failed" + ? `Workflow run failed (${detail.workflowName || detail.workflowId || detail.runId})` + : `Workflow run ${detail.status || "completed"} (${detail.workflowName || detail.workflowId || detail.runId})`, }); if (runs.length >= limit) break; } @@ -4424,7 +3983,6 @@ function normalizeCandidatePath(input) { if (!input) return ""; const raw = String(input).trim(); if (!raw) return ""; - if (hasUnresolvedTemplateTokens(raw)) return ""; try { return resolve(raw); } catch { @@ -5331,7 +4889,7 @@ async function buildWorktreePeek(wt) { const tracker = getSessionTracker(); const sessions = tracker - .listAllSessions({ includePersisted: false }) + .listAllSessions() .filter((s) => s.taskId === wt.taskKey || s.id === wt.taskKey); return { @@ -5597,8 +5155,6 @@ const UI_INSTANCE_LOCK_FILE = "ui-server.instance.lock.json"; const UI_SESSION_TOKEN_FILE = "ui-session-token.json"; const TUI_SESSION_TOKEN_FILE = "ui-token"; const UI_LAST_PORT_FILE = "ui-last-port.json"; -const DEFAULT_UI_INSTANCE_PROBE_TIMEOUT_MS = 1500; -const DEFAULT_UI_INSTANCE_STALE_GRACE_MS = 2 * 60 * 1000; const DEFAULT_AUTO_OPEN_COOLDOWN_MS = 12 * 60 * 60 * 1000; // 12h const DEFAULT_SESSION_TOKEN_TTL_MS = 30 * 24 * 60 * 60 * 1000; // 30 days const wsClients = new Set(); @@ -5674,7 +5230,7 @@ let _activeSessions = []; function getLiveSessionSnapshot({ includeHidden = false } = {}) { const tracker = getSessionTracker(); - let sessions = tracker.listAllSessions({ includePersisted: false }); + let sessions = tracker.listAllSessions(); if (!includeHidden) { sessions = sessions.filter((session) => { const detailed = tracker.getSessionById(session.id) || session; @@ -5744,10 +5300,6 @@ function resolveUiConfigDir() { try { mkdirSync(fromConfigPath, { recursive: true }); } catch { /* ok */ } return fromConfigPath; } - if (sandbox && process.env.BOSUN_TEST_ALLOW_REPO_LOCAL_CONFIG !== "1") { - try { mkdirSync(sandbox.configDir, { recursive: true }); } catch { /* ok */ } - return sandbox.configDir; - } if (String(process.env.REPO_ROOT || "").trim()) { const repoLocalConfigDirCandidates = [ resolve(repoRoot, ".bosun"), @@ -5872,113 +5424,7 @@ function writeUiInstanceLock(path, payload = {}) { } } -function clearUiInstanceLockFile(path) { - try { - if (path && existsSync(path)) unlinkSync(path); - } catch { - // best effort - } -} - -function describeUiInstanceTarget(payload = {}) { - return payload?.url - || (payload?.port - ? `${payload?.protocol || "http"}://${payload?.host || "127.0.0.1"}:${payload.port}` - : "unknown"); -} - -function normalizeUiProbeHost(host) { - const normalized = String(host || "").trim().toLowerCase(); - if ( - !normalized || - normalized === "0.0.0.0" || - normalized === "::" || - normalized === "[::]" || - normalized === "::0" - ) { - return "127.0.0.1"; - } - return normalized === "localhost" ? "127.0.0.1" : normalized; -} - -async function probeUiHealth(urlText, timeoutMs = DEFAULT_UI_INSTANCE_PROBE_TIMEOUT_MS) { - try { - const parsed = new URL(String(urlText || "")); - const isHttps = parsed.protocol === "https:"; - const requestImpl = isHttps ? httpsRequest : httpRequest; - const host = normalizeUiProbeHost(parsed.hostname); - return await new Promise((resolveProbe) => { - let settled = false; - const settle = (value) => { - if (settled) return; - settled = true; - resolveProbe(Boolean(value)); - }; - const req = requestImpl( - { - protocol: isHttps ? "https:" : "http:", - host, - port: parsed.port ? Number(parsed.port) : (isHttps ? 443 : 80), - path: "/healthz", - method: "GET", - timeout: Math.max(100, Math.trunc(Number(timeoutMs) || DEFAULT_UI_INSTANCE_PROBE_TIMEOUT_MS)), - }, - (res) => { - res.resume(); - settle(Number(res.statusCode || 0) >= 200 && Number(res.statusCode || 0) < 500); - }, - ); - req.on("error", () => settle(false)); - req.on("timeout", () => { - try { req.destroy(new Error("timeout")); } catch { /* best effort */ } - settle(false); - }); - req.end(); - }); - } catch { - return false; - } -} - -async function isUiInstanceResponsive(existing, { probeTimeoutMs } = {}) { - const port = Number(existing?.port || 0); - if (!Number.isFinite(port) || port <= 0 || port > 65535) return false; - const protocol = String(existing?.protocol || "").trim().toLowerCase() === "https" ? "https" : "http"; - const candidateUrls = []; - if (existing?.url) candidateUrls.push(String(existing.url)); - const host = normalizeUiProbeHost(existing?.host); - candidateUrls.push(`${protocol}://${host}:${port}`); - if (host !== "127.0.0.1") { - candidateUrls.push(`${protocol}://127.0.0.1:${port}`); - } - for (const candidate of new Set(candidateUrls.filter(Boolean))) { - if (await probeUiHealth(candidate, probeTimeoutMs)) return true; - } - return false; -} - -async function shouldTreatUiInstanceLockAsStale( - existing, - { - staleGraceMs = DEFAULT_UI_INSTANCE_STALE_GRACE_MS, - probeTimeoutMs = DEFAULT_UI_INSTANCE_PROBE_TIMEOUT_MS, - } = {}, -) { - if (!existing || existing.pid === process.pid) return false; - if (!isPidRunning(existing.pid)) return true; - const startedAt = Number(existing.startedAt || 0); - const ageMs = startedAt > 0 ? Math.max(0, Date.now() - startedAt) : Number.POSITIVE_INFINITY; - if (ageMs < Math.max(0, Math.trunc(Number(staleGraceMs) || DEFAULT_UI_INSTANCE_STALE_GRACE_MS))) { - return false; - } - return !(await isUiInstanceResponsive(existing, { probeTimeoutMs })); -} - -async function tryAcquireUiInstanceLock({ - preferredPort = 0, - staleGraceMs = DEFAULT_UI_INSTANCE_STALE_GRACE_MS, - probeTimeoutMs = DEFAULT_UI_INSTANCE_PROBE_TIMEOUT_MS, -} = {}) { +function tryAcquireUiInstanceLock({ preferredPort = 0 } = {}) { const lockPath = resolveUiInstanceLockPath(); uiInstanceLockPath = lockPath; const payload = { @@ -6010,15 +5456,14 @@ async function tryAcquireUiInstanceLock({ const current = readUiInstanceLock(lockPath); if (current && current.pid !== process.pid && isPidRunning(current.pid)) { - if (!(await shouldTreatUiInstanceLockAsStale(current, { staleGraceMs, probeTimeoutMs }))) { - return { ok: false, existing: current }; - } - console.warn( - `[telegram-ui] stale ui runtime lock detected (pid=${current.pid}) — reclaiming ${describeUiInstanceTarget(current)}`, - ); + return { ok: false, existing: current }; } - clearUiInstanceLockFile(lockPath); + try { + if (existsSync(lockPath)) unlinkSync(lockPath); + } catch { + // best effort + } try { return tryCreateLock(); @@ -6201,40 +5646,19 @@ function ensureSessionToken() { } function readLastUiPort() { - return readLastUiPortRecord()?.port || 0; -} - -function readLastUiPortRecord() { try { const portPath = resolveUiCachePath(UI_LAST_PORT_FILE); - if (!existsSync(portPath)) return null; + if (!existsSync(portPath)) return 0; const payload = JSON.parse(readFileSync(portPath, "utf8")); const port = Number(payload?.port || 0); - if (!Number.isFinite(port) || port <= 0 || port > 65535) return null; - return { - port: Math.trunc(port), - updatedAt: Number(payload?.updatedAt || 0) || 0, - pid: Number(payload?.pid || 0) || 0, - host: String(payload?.host || ""), - protocol: String(payload?.protocol || ""), - url: String(payload?.url || ""), - configDir: String(payload?.configDir || ""), - }; - } catch { - return null; - } -} - -function clearLastUiPort() { - try { - const portPath = resolveUiCachePath(UI_LAST_PORT_FILE); - if (existsSync(portPath)) unlinkSync(portPath); + if (!Number.isFinite(port) || port <= 0 || port > 65535) return 0; + return Math.trunc(port); } catch { - // best effort + return 0; } } -function persistLastUiPort(port, metadata = {}) { +function persistLastUiPort(port) { const normalized = Number(port || 0); if (!Number.isFinite(normalized) || normalized <= 0 || normalized > 65535) { return; @@ -6248,10 +5672,6 @@ function persistLastUiPort(port, metadata = {}) { port: Math.trunc(normalized), updatedAt: Date.now(), pid: process.pid, - host: String(metadata.host || ""), - protocol: String(metadata.protocol || ""), - url: String(metadata.url || ""), - configDir: resolveUiConfigDir(), }, null, 2, @@ -6963,154 +6383,11 @@ function normalizeGatesPolicy(raw = {}, options = {}) { }; } -function hasGuardrailsOverride(input = {}) { - if (!input || typeof input !== "object") return false; - return [ - input?.guardrailsOverride, - input?.overrideGuardrails, - input?.INPUTOverride, - input?.guardrails?.override, - input?.override?.guardrails, - ].some((value) => parseBooleanLike(value, false) === true); -} - -function resolveRequireReviewGuardrail() { - const explicit = process.env.BOSUN_FLOW_REQUIRE_REVIEW; - if (explicit !== undefined && String(explicit).trim() !== "") { - return parseBooleanLike(explicit, true); - } - return true; -} - -function resolvePreflightGuardrail(configData = {}) { - if (configData?.preflightEnabled !== undefined) { - return parseBooleanLike(configData.preflightEnabled, true); - } - return true; -} - -function buildHookGuardrailsOverview(rootDir) { - const catalog = getHookCatalog(); - const coreHooks = getCoreHooks(); - const defaultHooks = getDefaultHooks(); - const persistedState = loadHookState(rootDir); - const explicitStateKeys = Object.keys(persistedState?.enabled || {}); - const effectiveEnabledIds = explicitStateKeys.length > 0 - ? new Set(getEnabledHookIds(rootDir)) - : new Set(defaultHooks.map((hook) => hook.id)); - - return { - total: catalog.length, - coreCount: coreHooks.length, - defaultCount: defaultHooks.length, - enabledCount: effectiveEnabledIds.size, - enabledIds: [...effectiveEnabledIds].sort(), - hasPersistedState: explicitStateKeys.length > 0, - updatedAt: persistedState?.updatedAt || null, - categories: getHookCategories().map((category) => ({ - ...category, - enabledCount: catalog.filter((hook) => hook.category === category.id && effectiveEnabledIds.has(hook.id)).length, - })), - }; -} - -function buildTaskGuardrailsInput(body = {}, context = {}) { - return { - title: body?.title, - description: body?.description, - metadata: { - project: context.projectId || body?.project || "", - workspace: context.workspace || body?.workspace || "", - repository: context.repository || body?.repository || "", - repositories: context.repositories || body?.repositories || [], - priority: body?.priority || "", - status: body?.status || "", - type: body?.type || "", - tags: context.tags || body?.tags || [], - ...context.metadataTopLevel, - ...context.metadata, - ...(body?.meta && typeof body.meta === "object" ? body.meta : {}), - ...(body?.metadata && typeof body.metadata === "object" ? body.metadata : {}), - }, - }; -} - -function buildManualFlowGuardrailsInput(template, templateId, formValues = {}, executionContext = {}) { - return { - title: template?.name || templateId || "manual-flow", - description: template?.description || "", - metadata: { - templateId, - category: template?.category || "", - tags: Array.isArray(template?.tags) ? template.tags : [], - executionContext, - }, - formValues, - }; -} - -function buildGuardrailsSnapshot() { - const workspaceContext = resolveActiveWorkspaceExecutionContext(); - const workspaceDir = String(workspaceContext?.workspaceDir || repoRoot).trim() || repoRoot; - const { configData } = readConfigDocument(); - const guardrailsPolicy = ensureGuardrailsPolicy(workspaceDir); - const hooks = buildHookGuardrailsOverview(workspaceDir); - const repoGuardrails = detectRepoGuardrails(workspaceDir); - const runtime = { - preflightEnabled: resolvePreflightGuardrail(configData), - requireReview: resolveRequireReviewGuardrail(), - gates: normalizeGatesPolicy(configData?.gates, { - worktreeBootstrap: configData?.worktreeBootstrap, - }), - prAutomation: normalizePrAutomationPolicy(configData?.prAutomation, { includeOAuthTrustedAuthor: true }), - }; - - const warnings = []; - if (!runtime.preflightEnabled) warnings.push("Preflight checks are disabled."); - if (!runtime.requireReview) warnings.push("Review requirement is disabled."); - if (!guardrailsPolicy.INPUT.enabled) warnings.push("INPUT enforcement is disabled."); - if (!guardrailsPolicy.push.workflowOnly) warnings.push("Workflow-only push ownership is disabled."); - if (!guardrailsPolicy.push.blockAgentPushes) warnings.push("Agents are allowed to push directly."); - if (!guardrailsPolicy.push.requireManagedPrePush) warnings.push("Managed worktree pre-push validation is not required."); - if (!repoGuardrails.categories.prepush.detected) warnings.push("No prepush package script detected."); - if (!repoGuardrails.categories.ci.detected) warnings.push("No CI-like package scripts detected."); - - return { - workspace: { - workspaceId: workspaceContext?.workspaceId || "", - workspaceDir, - workspaceRoot: workspaceContext?.workspaceRoot || workspaceDir, - }, - summary: { - status: warnings.length === 0 ? "guarded" : warnings.length <= 2 ? "partial" : "needs-attention", - counts: { - hooksEnabled: hooks.enabledCount, - hooksTotal: hooks.total, - repoGuardrailsDetected: repoGuardrails.detectedCount, - runtimeEnabled: Number(runtime.preflightEnabled) + Number(runtime.requireReview), - INPUTEnabled: guardrailsPolicy.INPUT.enabled ? 1 : 0, - }, - warnings, - }, - hooks, - runtime, - repoGuardrails, - INPUT: { - policyPath: getGuardrailsPolicyPath(workspaceDir), - policy: guardrailsPolicy.INPUT, - }, - push: { - policyPath: getGuardrailsPolicyPath(workspaceDir), - policy: guardrailsPolicy.push, - }, - }; -} - -function validateConfigSchemaChanges(changes) { - try { - const schema = getConfigSchema(); - const validator = getConfigValidator(); - if (!schema || !validator) return {}; +function validateConfigSchemaChanges(changes) { + try { + const schema = getConfigSchema(); + const validator = getConfigValidator(); + if (!schema || !validator) return {}; const configPath = resolveConfigPath(); let configData = {}; @@ -8307,7 +7584,6 @@ async function spawnCloudflared(cfBin, args, maxRetries = 3) { return spawn(cfBin, args, { stdio: ["ignore", "pipe", "pipe"], detached: false, - windowsHide: true, }); } catch (err) { if (err.code === "ETXTBSY" && attempt < maxRetries) { @@ -8976,7 +8252,7 @@ function scrubStackTraces(payload) { return out; } function normalizeJsonResponsePayload(payload) { - return makeJsonSafe(scrubStackTraces(payload), { maxDepth: 12 }); + return makeJsonSafe(scrubStackTraces(payload), { maxDepth: 6 }); } function makeJsonSafe(value, options = {}) { @@ -10682,25 +9958,17 @@ function broadcastCanonicalEvent(channels, type, payload = {}) { payload, ts: Date.now(), }; - /* Yield the event loop between sends so HTTP requests aren't starved during - workflow event bursts (e.g. 20 PRs × many node transitions). */ - if (wsClients.size > 0) { - setImmediate(() => { - for (const socket of wsClients) { - const subscribed = socket.__channels || new Set(["*"]); - const shouldSend = subscribed.has("*") || Array.from(required).some((channel) => subscribed.has(channel)); - if (shouldSend) sendWsMessage(socket, message); - } - }); + for (const socket of wsClients) { + const subscribed = socket.__channels || new Set(["*"]); + const shouldSend = subscribed.has("*") || Array.from(required).some((channel) => subscribed.has(channel)); + if (shouldSend) sendWsMessage(socket, message); } } function getCurrentSessionSnapshot() { try { const tracker = getSessionTracker(); - return buildSessionsUpdatePayload( - tracker?.listAllSessions?.({ includePersisted: false }) || [], - ); + return buildSessionsUpdatePayload(tracker?.listAllSessions?.() || []); } catch { return []; } @@ -12399,33 +11667,6 @@ async function readStatusSnapshot() { } } -async function readUiWorktreeRecovery() { - const snapshot = await readStatusSnapshot(); - if (snapshot && typeof snapshot === "object" && snapshot.worktreeRecovery) { - return snapshot.worktreeRecovery; - } - return readWorktreeRecoveryState(repoRoot); -} - -/** - * Non-blocking async shell exec for git/gh calls inside HTTP request handlers. - * Prevents execSync/spawnSync from blocking the event loop and starving other requests. - */ -function execAsync(cmd, { cwd, timeout = 10_000, encoding = "utf8" } = {}) { - return new Promise((resolve, reject) => { - const child = exec(cmd, { cwd, timeout, encoding, windowsHide: true }); - let stdout = ""; - let stderr = ""; - child.stdout?.on("data", (d) => { stdout += d; }); - child.stderr?.on("data", (d) => { stderr += d; }); - child.on("close", (code) => { - if (code === 0 || code === null) resolve(stdout); - else reject(Object.assign(new Error(`exec failed (${code}): ${cmd}`), { stdout, stderr, exitCode: code })); - }); - child.on("error", reject); - }); -} - function runGit(args, timeoutMs = 10000) { const argList = Array.isArray(args) ? args @@ -12911,18 +12152,7 @@ function withinDays(entry, days) { async function readCompletedSessionEntries(maxLines = 100_000) { // Check multiple candidate paths — repoRoot may be the monorepo root // while data lives under the bosun subdirectory. - // When REPO_ROOT is not explicitly set, OR when repoRoot resolves to a - // workspace clone (e.g. .bosun/workspaces//bosun), the module-relative - // path is added first so we find the data written by task-executor - // (__dirname-relative) even when resolveRepoRoot() returns a workspace clone - // path instead of the module root. - const repoRootNorm = resolve(repoRoot).replace(/\\/g, "/"); - const isWorkspaceClone = repoRootNorm.includes("/.bosun/workspaces/"); - const useModuleRelative = !process.env.REPO_ROOT || isWorkspaceClone; const candidates = [ - ...(useModuleRelative - ? [resolve(__dirname, "..", ".cache", "session-accumulator.jsonl")] - : []), resolve(repoRoot, ".cache", "session-accumulator.jsonl"), resolve(repoRoot, "bosun", ".cache", "session-accumulator.jsonl"), ]; @@ -13113,12 +12343,6 @@ async function buildUsageAnalytics(days) { const dailySkills = {}; /** dailyMcp[date][tool] = count */ const dailyMcp = {}; - /** dailyInputTokens[date] = total input tokens */ - const dailyInputTokens = {}; - /** dailyOutputTokens[date] = total output tokens */ - const dailyOutputTokens = {}; - /** dailyTotalTokens[date] = total tokens */ - const dailyTotalTokens = {}; const allDates = new Set(); @@ -13135,11 +12359,6 @@ async function buildUsageAnalytics(days) { if (ts > newestTs) newestTs = ts; const day = getEntryDayKey(session, ts); if (day) allDates.add(day); - if (day) { - dailyInputTokens[day] = (dailyInputTokens[day] || 0) + numberOrZero(session.inputTokens); - dailyOutputTokens[day] = (dailyOutputTokens[day] || 0) + numberOrZero(session.outputTokens); - dailyTotalTokens[day] = (dailyTotalTokens[day] || 0) + numberOrZero(session.tokenCount); - } agentRuns += 1; const exec = String(session.executor || session.model || "unknown").trim() || "unknown"; @@ -13214,15 +12433,7 @@ async function buildUsageAnalytics(days) { const topSkillNames = topSkills.slice(0, 6).map((s) => s.name); const topMcpNames = topMcpTools.slice(0, 6).map((t) => t.name); - const trend = { - dates: sortedDates, - agents: {}, - skills: {}, - mcpTools: {}, - tokens: sortedDates.map((d) => dailyTotalTokens[d] || 0), - inputTokens: sortedDates.map((d) => dailyInputTokens[d] || 0), - outputTokens: sortedDates.map((d) => dailyOutputTokens[d] || 0), - }; + const trend = { dates: sortedDates, agents: {}, skills: {}, mcpTools: {} }; for (const name of topAgentNames) { trend.agents[name] = sortedDates.map((d) => dailyAgents[d]?.[name] || 0); } @@ -13233,17 +12444,10 @@ async function buildUsageAnalytics(days) { trend.mcpTools[name] = sortedDates.map((d) => dailyMcp[d]?.[name] || 0); } - const totalTokens = sessionWindow.reduce((sum, session) => sum + numberOrZero(session.tokenCount), 0); - const totalInputTokens = sessionWindow.reduce((sum, session) => sum + numberOrZero(session.inputTokens), 0); - const totalOutputTokens = sessionWindow.reduce((sum, session) => sum + numberOrZero(session.outputTokens), 0); - return { agentRuns, skillInvocations, mcpToolCalls, - totalTokens, - totalInputTokens, - totalOutputTokens, avgPerDay, lastActiveAt: newestTs < Infinity && newestTs > 0 ? new Date(newestTs).toISOString() : null, sinceAt: oldestTs < Infinity ? new Date(oldestTs).toISOString() : null, @@ -13260,18 +12464,7 @@ async function buildUsageAnalytics(days) { } function resolveAgentWorkLogDir() { - // When REPO_ROOT is not explicitly set, OR when repoRoot resolves to a - // workspace clone (e.g. .bosun/workspaces//bosun), the module-relative - // path is added first so we find the data written by task-executor - // (__dirname-relative) even when resolveRepoRoot() returns a workspace clone - // path instead of the module root. - const repoRootNorm = resolve(repoRoot).replace(/\\/g, "/"); - const isWorkspaceClone = repoRootNorm.includes("/.bosun/workspaces/"); - const useModuleRelative = !process.env.REPO_ROOT || isWorkspaceClone; const candidates = [ - ...(useModuleRelative - ? [resolve(__dirname, "..", ".cache", "agent-work-logs")] - : []), resolve(repoRoot, ".cache", "agent-work-logs"), // When repoRoot is the monorepo root, data lives under bosun/.cache resolve(repoRoot, "bosun", ".cache", "agent-work-logs"), @@ -13438,25 +12631,6 @@ async function readReplayableAgentRun(attemptId) { const first = events[0] || null; const last = events[events.length - 1] || null; const overview = buildReplayOverview(events); - const turns = []; - let turnIndex = 0; - for (const event of events) { - if (event?.type !== "usage") continue; - turnIndex += 1; - const usage = event?.data?.usage && typeof event.data.usage === "object" ? event.data.usage : {}; - const tokenCount = Number(usage.total_tokens ?? usage.totalTokens ?? event?.data?.tokenCount ?? 0) || 0; - const inputTokens = Number(usage.input_tokens ?? usage.inputTokens ?? 0) || 0; - const outputTokens = Number(usage.output_tokens ?? usage.outputTokens ?? 0) || 0; - const durationMs = Number(event?.data?.duration_ms ?? event?.data?.durationMs ?? 0) || 0; - turns.push({ - index: turnIndex, - timestamp: event.timestamp || null, - tokenCount, - inputTokens, - outputTokens, - durationMs, - }); - } return { attemptId: normalizedAttemptId, taskId: first?.taskId || last?.taskId || null, @@ -13469,7 +12643,6 @@ async function readReplayableAgentRun(attemptId) { : "in_progress", shortSteps: overview.shortSteps, totals: overview.totals, - turns, events, }; } @@ -13733,10 +12906,7 @@ async function resolveTaskLinkedWorktreePath(task, tracker = null) { const sessionTracker = tracker || getSessionTracker(); for (const sessionId of collectTaskLinkedSessionIds(task, sessionTracker)) { - const session = - sessionTracker?.getSessionById?.(sessionId) || - sessionTracker?.getSessionMessages?.(sessionId) || - sessionTracker?.getSession?.(sessionId); + const session = sessionTracker?.getSession?.(sessionId); const worktreePath = await resolveSessionWorktreePath(session); if (worktreePath && existsSync(worktreePath)) return worktreePath; } @@ -13758,17 +12928,13 @@ async function handleApi(req, res, url) { if (req.method === "OPTIONS") { res.writeHead(204, { "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Methods": "GET,POST,PUT,DELETE,OPTIONS", + "Access-Control-Allow-Methods": "GET,POST,OPTIONS", "Access-Control-Allow-Headers": "Content-Type,X-Telegram-InitData,X-Bosun-Fallback-Auth", }); res.end(); return; } - if (path.startsWith("/api/vault")) { - if (await handleVaultApi(req, res, path)) return; - } - if (path === "/api/auth/fallback/status" && req.method === "GET") { jsonResponse(res, 200, { ok: true, @@ -13807,45 +12973,6 @@ async function handleApi(req, res, url) { return; } - if (path === "/api/health") { - const payload = await getOrComputeCachedApiResponse("health", 2000, async () => ({ - ok: true, - uptime: process.uptime(), - wsClients: wsClients.size, - lanIp: getLocalLanIp(), - url: getTelegramUiUrl(), - })); - jsonResponse(res, 200, payload); - return; - } - - if (path === "/api/health-stats") { - const payload = await getOrComputeCachedApiResponse("health-stats", 30_000, async () => { - const SIX_HOURS_MS = 6 * 60 * 60 * 1000; - const cutoff = new Date(Date.now() - SIX_HOURS_MS).toISOString(); - let successRuns = 0; - let failedRuns = 0; - try { - const tasks = getAllInternalTasks(); - for (const task of tasks) { - for (const entry of (task.statusHistory || [])) { - if (entry.timestamp < cutoff) continue; - const normalizedStatus = String(entry.status || "").toLowerCase(); - if (normalizedStatus === "done") successRuns++; - else if (normalizedStatus === "error" || normalizedStatus === "failed" || normalizedStatus === "blocked") failedRuns++; - } - } - } catch { - // Task store not loaded or unavailable - } - const total = successRuns + failedRuns; - const failRate = total > 0 ? failedRuns / total : 0; - return { ok: true, successRuns, failedRuns, total, failRate, windowHours: 6 }; - }); - jsonResponse(res, 200, payload); - return; - } - const authResult = await requireAuth(req); if (!authResult?.ok) { jsonResponse(res, 401, { @@ -13955,40 +13082,9 @@ async function handleApi(req, res, url) { if (path === "/api/executor") { const executor = uiDeps.getInternalExecutor?.(); const mode = uiDeps.getExecutorMode?.() || "internal"; - const execStatus = executor?.getStatus?.() || null; - const workspaceContext = resolveWorkspaceContextFromRequest(url, { allowAll: false }) - || resolveActiveWorkspaceExecutionContext(); - /* Augment with active workflow run counts so Fleet Overview reflects - real system load even when no agent subprocess slots are occupied */ - let activeWorkflowRuns = 0; - let workflowRunDetails = []; - try { - const wfCtx = await getWorkflowRequestContext(url, { bootstrapTemplates: false }).catch(() => null); - if (wfCtx?.ok && wfCtx.engine) { - const runs = await Promise.resolve(wfCtx.engine.list?.() || []).catch(() => []); - const active = (Array.isArray(runs) ? runs : []).filter( - (r) => String(r?.status || "").toLowerCase() === "running" - ); - activeWorkflowRuns = active.length; - workflowRunDetails = active.slice(0, 20).map((r) => ({ - runId: r.id || r.runId, - workflowId: r.workflowId, - workflowName: r.workflowName || r.workflowId, - startedAt: r.startedAt, - activeNodeCount: r.activeNodeCount || 0, - })); - } - } catch { - // best-effort: executor data is still returned without workflow augmentation - } - const workspaceSummary = execStatus - ? buildWorkspaceExecutorSummary(execStatus, workspaceContext) - : null; jsonResponse(res, 200, { ok: true, - data: execStatus - ? { ...execStatus, workspaceSummary, activeWorkflowRuns, workflowRunDetails } - : null, + data: executor?.getStatus?.() || null, mode, paused: executor?.isPaused?.() || false, }); @@ -14604,7 +13700,7 @@ async function handleApi(req, res, url) { const sprintId = resolveTaskSprintId(detailTask); const sprintDag = includeDag && sprintId ? await getSprintDagData(sprintId) : null; const globalDag = includeDag ? await getGlobalDagData() : null; - const blockedContext = await buildTaskBlockedContext(detailTask, { + const blockedContext = buildTaskBlockedContext(detailTask, { canStart, workflowRuns: mergedWorkflowRuns, workspaceDir: workspaceContext?.workspaceDir || repoRoot, @@ -16362,33 +15458,6 @@ async function handleApi(req, res, url) { ? body.repositories.filter((value) => typeof value === "string" && value.trim()) : []; const metadataFields = buildTaskMetadataPatch(body || {}); - const workspaceContext = resolveActiveWorkspaceExecutionContext(); - const guardrailsRootDir = String(workspaceContext?.workspaceDir || repoRoot).trim() || repoRoot; - const INPUTPolicy = loadGuardrailsPolicy(guardrailsRootDir); - const taskAssessment = assessInputQuality( - buildTaskGuardrailsInput(body, { - projectId, - workspace, - repository, - repositories, - tags, - metadataTopLevel: metadataFields.topLevel, - metadata: metadataFields.meta, - }), - INPUTPolicy.INPUT, - ); - const taskAssessmentPayload = taskAssessment.blocked && hasGuardrailsOverride(body || {}) - ? { ...taskAssessment, overrideAccepted: true } - : taskAssessment; - if (taskAssessment.blocked && taskAssessmentPayload.overrideAccepted !== true) { - jsonResponse(res, 400, { - ok: false, - error: "Input blocked by INPUT guardrails", - code: "guardrails_INPUT_blocked", - assessment: taskAssessmentPayload, - }); - return; - } const taskData = { title: String(title).trim(), description: body?.description || "", @@ -16414,7 +15483,7 @@ async function handleApi(req, res, url) { }; const createdRaw = await adapter.createTask(projectId, taskData); const created = withTaskMetadataTopLevel(createdRaw); - jsonResponse(res, 200, { ok: true, data: created, assessment: taskAssessmentPayload }); + jsonResponse(res, 200, { ok: true, data: created }); broadcastUiEvent(["tasks", "overview"], "invalidate", { reason: "task-created", taskId: created?.id || null, @@ -17164,35 +16233,13 @@ async function handleApi(req, res, url) { } if (action === "enable") { const result = enableHook(rootDir, hookId); - if (result.success) { - broadcastUiEvent(["guardrails", "library", "overview"], "invalidate", { - reason: "hook-state-updated", - action, - hookId, - workspaceId: workspaceContext.workspaceId || "", - }); - } jsonResponse(res, result.success ? 200 : 400, { ok: result.success, ...result }); } else if (action === "disable") { const force = body?.force === true; const result = disableHook(rootDir, hookId, force); - if (result.success) { - broadcastUiEvent(["guardrails", "library", "overview"], "invalidate", { - reason: "hook-state-updated", - action, - hookId, - workspaceId: workspaceContext.workspaceId || "", - }); - } jsonResponse(res, result.success ? 200 : 400, { ok: result.success, ...result }); } else if (action === "initialize") { const state = initializeHookState(rootDir); - broadcastUiEvent(["guardrails", "library", "overview"], "invalidate", { - reason: "hook-state-initialized", - action, - hookId, - workspaceId: workspaceContext.workspaceId || "", - }); jsonResponse(res, 200, { ok: true, data: state }); } else { jsonResponse(res, 400, { ok: false, error: `Unknown action: ${action}. Use enable, disable, or initialize.` }); @@ -17432,16 +16479,12 @@ async function handleApi(req, res, url) { // ── Workspace Management API ────────────────────────────────────────────── if (path === "/api/workspaces") { try { - const payload = await getOrComputeCachedApiResponse(`workspaces:${repoRoot}`, 3000, async () => { - const configDir = resolveUiConfigDir(); - // Auto-initialize workspaces from disk if config has none yet. - // This path can touch the filesystem repeatedly under UI polling. - const { workspaces: initialized } = initializeWorkspaces(configDir, { repoRoot }); - const workspaces = initialized.length > 0 ? initialized : listManagedWorkspaces(configDir, { repoRoot }); - const active = getActiveManagedWorkspace(configDir); - return { ok: true, data: workspaces, activeId: active?.id || null }; - }); - jsonResponse(res, 200, payload); + const configDir = resolveUiConfigDir(); + // Auto-initialize workspaces from disk if config has none yet + const { workspaces: initialized } = initializeWorkspaces(configDir, { repoRoot }); + const workspaces = initialized.length > 0 ? initialized : listManagedWorkspaces(configDir, { repoRoot }); + const active = getActiveManagedWorkspace(configDir); + jsonResponse(res, 200, { ok: true, data: workspaces, activeId: active?.id || null }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); } @@ -17480,8 +16523,6 @@ async function handleApi(req, res, url) { return; } setActiveManagedWorkspace(configDir, wsId); - invalidateApiCache("workspaces:"); - invalidateApiCache("workflows:"); const active = getActiveManagedWorkspace(configDir); jsonResponse(res, 200, { ok: true, @@ -17503,22 +16544,20 @@ async function handleApi(req, res, url) { if (path === "/api/workspaces/active/repos") { try { - const payload = await getOrComputeCachedApiResponse(`workspaces:active-repos:${repoRoot}`, 3000, async () => { - const configDir = resolveUiConfigDir(); - const active = getActiveManagedWorkspace(configDir); - if (!active) { - return { ok: true, repos: [] }; - } - const repos = Array.isArray(active.repos) - ? active.repos.map((r) => ({ - name: r.name || r.path || "", - path: r.path || "", - primary: Boolean(r.primary), - })) - : []; - return { ok: true, repos }; - }); - jsonResponse(res, 200, payload); + const configDir = resolveUiConfigDir(); + const active = getActiveManagedWorkspace(configDir); + if (!active) { + jsonResponse(res, 200, { ok: true, repos: [] }); + return; + } + const repos = Array.isArray(active.repos) + ? active.repos.map((r) => ({ + name: r.name || r.path || "", + path: r.path || "", + primary: Boolean(r.primary), + })) + : []; + jsonResponse(res, 200, { ok: true, repos }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); } @@ -17749,7 +16788,7 @@ async function handleApi(req, res, url) { try { const worktrees = listActiveWorktrees(repoRoot); const stats = await getWorktreeStats(repoRoot); - const recovery = await readUiWorktreeRecovery(); + const recovery = await readWorktreeRecoveryState(repoRoot); const recoveryBackfill = buildRecoveryBackfilledWorktrees(worktrees, recovery); jsonResponse(res, 200, { ok: true, @@ -18406,30 +17445,34 @@ if (path === "/api/agent-logs/context") { let gitStatus = ""; let diffStat = ""; try { - gitLog = (await execAsync("git log --oneline -5", { + gitLog = execSync("git log --oneline -5 2>&1", { cwd: wtPath, + encoding: "utf8", timeout: 10000, - })).trim(); + }).trim(); } catch { gitLog = ""; } try { - gitStatus = (await execAsync("git status --short", { + gitStatus = execSync("git status --short 2>&1", { cwd: wtPath, + encoding: "utf8", timeout: 10000, - })).trim(); + }).trim(); } catch { gitStatus = ""; } try { - const branch = (await execAsync("git branch --show-current", { + const branch = execSync("git branch --show-current 2>&1", { cwd: wtPath, + encoding: "utf8", timeout: 5000, - })).trim(); - diffStat = (await execAsync(`git diff --stat main...${branch}`, { + }).trim(); + diffStat = execSync(`git diff --stat main...${branch} 2>&1`, { cwd: wtPath, + encoding: "utf8", timeout: 10000, - })).trim(); + }).trim(); } catch { diffStat = ""; } @@ -18479,7 +17522,7 @@ if (path === "/api/agent-logs/context") { try { const executor = uiDeps.getInternalExecutor?.(); const status = executor?.getStatus?.() || {}; - const worktreeRecovery = await readUiWorktreeRecovery(); + const worktreeRecovery = await readWorktreeRecoveryState(repoRoot); const data = { executor: { mode: uiDeps.getExecutorMode?.() || "internal", @@ -18617,17 +17660,17 @@ if (path === "/api/agent-logs/context") { wtPath = resolve(worktreeDir, wtName); worktreeMatches.push(...matches); } - const runWtGit = async (args) => { + const runWtGit = (args) => { try { - return (await execAsync(`git ${args}`, { cwd: wtPath, timeout: 5000 })).trim(); + return execSync(`git ${args}`, { cwd: wtPath, encoding: "utf8", timeout: 5000 }).trim(); } catch { return ""; } }; - const gitLog = await runWtGit("log --oneline -10"); - const gitLogDetailed = await runWtGit("log --format=%h||%D||%s||%cr -10"); - const gitStatus = await runWtGit("status --porcelain"); - const gitBranch = await runWtGit("rev-parse --abbrev-ref HEAD"); - const gitDiffStat = await runWtGit("diff --stat"); - const gitAheadBehind = await runWtGit("rev-list --left-right --count HEAD...@{upstream} 2>/dev/null"); + const gitLog = runWtGit("log --oneline -10"); + const gitLogDetailed = runWtGit("log --format=%h||%D||%s||%cr -10"); + const gitStatus = runWtGit("status --porcelain"); + const gitBranch = runWtGit("rev-parse --abbrev-ref HEAD"); + const gitDiffStat = runWtGit("diff --stat"); + const gitAheadBehind = runWtGit("rev-list --left-right --count HEAD...@{upstream} 2>/dev/null"); const changedFiles = gitStatus ? gitStatus .split("\n") @@ -18778,11 +17821,12 @@ if (path === "/api/agent-logs/context") { jsonResponse(res, 400, { ok: false, error: "branch is required" }); return; } - const hasRef = async (ref) => { + const hasRef = (ref) => { try { - await execAsync(`git show-ref --verify --quiet ${ref}`, { + execSync(`git show-ref --verify --quiet ${ref}`, { cwd: repoRoot, timeout: 5000, + stdio: "ignore", }); return true; } catch { @@ -18790,10 +17834,10 @@ if (path === "/api/agent-logs/context") { } }; const baseRef = - ((await hasRef("refs/heads/main")) && "main") || - ((await hasRef("refs/remotes/origin/main")) && "origin/main") || - ((await hasRef("refs/heads/master")) && "master") || - ((await hasRef("refs/remotes/origin/master")) && "origin/master") || + (hasRef("refs/heads/main") && "main") || + (hasRef("refs/remotes/origin/main") && "origin/main") || + (hasRef("refs/heads/master") && "master") || + (hasRef("refs/remotes/origin/master") && "origin/master") || null; const diffRange = baseRef ? `${baseRef}...${safe}` : `${safe}~1..${safe}`; const commitsRaw = runGit(`log ${safe} --format=%h||%s||%cr -20`, 15000); @@ -19092,28 +18136,19 @@ if (path === "/api/agent-logs/context") { if (path === "/api/workflows") { try { - const payload = await getOrComputeCachedApiResponse(`workflows:list:${url.search}`, 3000, async () => { - const wfCtx = await getWorkflowRequestContext(url, { bootstrapTemplates: false }); - if (!wfCtx.ok) { - return { __error: true, status: wfCtx.status, body: { ok: false, error: wfCtx.error } }; - } - const engine = wfCtx.engine; - const all = (await engine.list()).filter((workflow) => !shouldHideGeneratedWorkflowFromList(workflow)); - return { - ok: true, - workflows: all.map((w) => ({ - id: w.id, name: w.name, description: w.description, category: w.category, - enabled: w.enabled !== false, - nodeCount: Number.isFinite(w.nodeCount) ? w.nodeCount : (w.nodes || []).length, - trigger: w.trigger || (w.nodes || [])[0]?.type || "manual", - })), - }; - }); - if (payload?.__error) { - jsonResponse(res, payload.status, payload.body); + const wfCtx = await getWorkflowRequestContext(url, { bootstrapTemplates: false }); + if (!wfCtx.ok) { + jsonResponse(res, wfCtx.status, { ok: false, error: wfCtx.error }); return; } - jsonResponse(res, 200, payload); + const engine = wfCtx.engine; + const all = engine.list().filter((workflow) => !shouldHideGeneratedWorkflowFromList(workflow)); + jsonResponse(res, 200, { ok: true, workflows: all.map(w => ({ + id: w.id, name: w.name, description: w.description, category: w.category, + enabled: w.enabled !== false, + nodeCount: Number.isFinite(w.nodeCount) ? w.nodeCount : (w.nodes || []).length, + trigger: w.trigger || (w.nodes || [])[0]?.type || "manual", + })) }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); } @@ -19133,7 +18168,6 @@ if (path === "/api/agent-logs/context") { _wfTemplates.applyWorkflowTemplateState(body); } const saved = await engine.save(body); - invalidateApiCache("workflows:"); jsonResponse(res, 200, { ok: true, workflow: saved }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); @@ -19152,7 +18186,6 @@ if (path === "/api/agent-logs/context") { const engine = wfCtx.engine; const workflowPayload = body?.workflow ?? body; const imported = await engine.import(workflowPayload); - invalidateApiCache("workflows:"); jsonResponse(res, 200, { ok: true, workflow: imported }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); @@ -19163,12 +18196,7 @@ if (path === "/api/agent-logs/context") { // GET /api/workflows/concurrency — live concurrency stats for dashboard if (path === "/api/workflows/concurrency" && req.method === "GET") { try { - const wfCtx = await getWorkflowRequestContext(url, { bootstrapTemplates: false }); - if (!wfCtx.ok) { - jsonResponse(res, wfCtx.status, { ok: false, error: wfCtx.error }); - return; - } - const stats = await wfCtx.engine.getConcurrencyStats(); + const stats = engine.getConcurrencyStats(); jsonResponse(res, 200, { ok: true, ...stats }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); @@ -19493,10 +18521,10 @@ if (path === "/api/agent-logs/context") { ? Math.min(rawLimit, 5000) : 20; const page = typeof engine.getRunHistoryPage === "function" - ? await engine.getRunHistoryPage(null, { offset, limit }) + ? engine.getRunHistoryPage(null, { offset, limit }) : { - runs: engine.getRunHistory ? await engine.getRunHistory(null, limit) : [], - total: engine.getRunHistory ? (await engine.getRunHistory(null)).length : 0, + runs: engine.getRunHistory ? engine.getRunHistory(null, limit) : [], + total: engine.getRunHistory ? engine.getRunHistory(null).length : 0, offset, limit, }; @@ -19542,7 +18570,7 @@ if (path === "/api/agent-logs/context") { } if (action === "copilot-context" && (req.method === "GET" || req.method === "POST")) { - const run = typeof engine.getRunDetail === "function" ? await engine.getRunDetail(runId) : null; + const run = typeof engine.getRunDetail === "function" ? engine.getRunDetail(runId) : null; if (!run) { jsonResponse(res, 404, { ok: false, error: "Workflow run not found" }); return; @@ -19555,14 +18583,14 @@ if (path === "/api/agent-logs/context") { requestBody?.nodeId || url.searchParams.get("nodeId") || "", ).trim(); const workflow = typeof engine.get === "function" - ? await engine.get(String(run?.workflowId || "").trim()) + ? engine.get(String(run?.workflowId || "").trim()) : null; const nodeForensics = nodeId && typeof engine.getNodeForensics === "function" - ? await engine.getNodeForensics(runId, nodeId) + ? engine.getNodeForensics(runId, nodeId) : null; const runForensics = typeof engine.getRunForensics === "function" - ? await engine.getRunForensics(runId) + ? engine.getRunForensics(runId) : null; const payload = buildRunCopilotContextPayload(run, { intent, @@ -19589,7 +18617,7 @@ if (path === "/api/agent-logs/context") { } const body = await readJsonBody(req); const reason = String(body?.reason || "Run cancellation requested from UI").trim() || "Run cancellation requested from UI"; - const result = await engine.cancelRun(runId, { reason }); + const result = engine.cancelRun(runId, { reason }); if (!result?.ok) { const statusCode = String(result?.error || "").includes("not found") ? 404 : 409; jsonResponse(res, statusCode, { @@ -19615,7 +18643,7 @@ if (path === "/api/agent-logs/context") { // If mode is omitted, returns available retry options so the UI can // present a choice to the user. if (action === "retry" && req.method === "POST") { - const run = engine.getRunDetail ? await engine.getRunDetail(runId) : null; + const run = engine.getRunDetail ? engine.getRunDetail(runId) : null; if (!run) { jsonResponse(res, 404, { ok: false, error: "Workflow run not found" }); return; @@ -19628,7 +18656,7 @@ if (path === "/api/agent-logs/context") { const mode = body?.mode; if (!mode) { const retryOptions = typeof engine.getRetryOptions === "function" - ? await engine.getRetryOptions(runId) + ? engine.getRetryOptions(runId) : null; if (retryOptions) { jsonResponse(res, 200, { @@ -19672,7 +18700,7 @@ if (path === "/api/agent-logs/context") { return; } const forensics = typeof engine.getNodeForensics === "function" - ? await engine.getNodeForensics(runId, nodeId) + ? engine.getNodeForensics(runId, nodeId) : null; if (!forensics) { jsonResponse(res, 404, { ok: false, error: "Node not found in run" }); @@ -19685,7 +18713,7 @@ if (path === "/api/agent-logs/context") { // ── GET /api/workflows/runs/:id/forensics — full run forensics ── if (action === "forensics" && req.method === "GET") { const forensics = typeof engine.getRunForensics === "function" - ? await engine.getRunForensics(runId) + ? engine.getRunForensics(runId) : null; if (!forensics) { jsonResponse(res, 404, { ok: false, error: "Run not found" }); @@ -19697,7 +18725,7 @@ if (path === "/api/agent-logs/context") { // ── GET /api/workflows/runs/:id/evaluate — run evaluation ─────── if (action === "evaluate" && req.method === "GET") { - const run = engine.getRunDetail ? await engine.getRunDetail(runId) : null; + const run = engine.getRunDetail ? engine.getRunDetail(runId) : null; if (!run) { jsonResponse(res, 404, { ok: false, error: "Workflow run not found" }); return; @@ -19715,7 +18743,7 @@ if (path === "/api/agent-logs/context") { jsonResponse(res, 501, { ok: false, error: "Snapshots not supported" }); return; } - const result = await engine.createRunSnapshot(runId); + const result = engine.createRunSnapshot(runId); if (!result) { jsonResponse(res, 404, { ok: false, error: "Run not found" }); return; @@ -19726,10 +18754,10 @@ if (path === "/api/agent-logs/context") { // ── GET /api/workflows/runs/:id/snapshots — list snapshots ────── if (action === "snapshots" && req.method === "GET") { - const run = engine.getRunDetail ? await engine.getRunDetail(runId) : null; + const run = engine.getRunDetail ? engine.getRunDetail(runId) : null; const workflowId = run?.workflowId || run?.detail?.data?._workflowId || null; const snapshots = typeof engine.listSnapshots === "function" - ? await engine.listSnapshots(workflowId) + ? engine.listSnapshots(workflowId) : []; jsonResponse(res, 200, { ok: true, snapshots }); return; @@ -19756,7 +18784,7 @@ if (path === "/api/agent-logs/context") { // ── POST /api/workflows/runs/:id/remediate — apply fix actions ── if (action === "remediate" && req.method === "POST") { - const run = engine.getRunDetail ? await engine.getRunDetail(runId) : null; + const run = engine.getRunDetail ? engine.getRunDetail(runId) : null; if (!run) { jsonResponse(res, 404, { ok: false, error: "Workflow run not found" }); return; @@ -19873,42 +18901,39 @@ if (path === "/api/agent-logs/context") { } if (action === "runs") { - const payload = await getOrComputeCachedApiResponse(`workflows:runs:${workflowId}:${url.search}`, 2000, async () => { - const rawOffset = Number(url.searchParams.get("offset")); - const rawLimit = Number(url.searchParams.get("limit")); - const offset = Number.isFinite(rawOffset) && rawOffset > 0 - ? Math.max(0, Math.floor(rawOffset)) - : 0; - const limit = Number.isFinite(rawLimit) && rawLimit > 0 - ? Math.min(rawLimit, 5000) - : 20; - const page = typeof engine.getRunHistoryPage === "function" - ? engine.getRunHistoryPage(workflowId, { offset, limit }) - : { - runs: engine.getRunHistory ? engine.getRunHistory(workflowId, limit) : [], - total: engine.getRunHistory ? engine.getRunHistory(workflowId).length : 0, - offset, - limit, - }; - const runs = Array.isArray(page?.runs) ? page.runs : []; - const total = Number.isFinite(Number(page?.total)) ? Number(page.total) : runs.length; - const nextOffset = Number.isFinite(Number(page?.nextOffset)) - ? Number(page.nextOffset) - : (offset + runs.length < total ? offset + runs.length : null); - return { - ok: true, - runs, - pagination: { - total, + const rawOffset = Number(url.searchParams.get("offset")); + const rawLimit = Number(url.searchParams.get("limit")); + const offset = Number.isFinite(rawOffset) && rawOffset > 0 + ? Math.max(0, Math.floor(rawOffset)) + : 0; + const limit = Number.isFinite(rawLimit) && rawLimit > 0 + ? Math.min(rawLimit, 5000) + : 20; + const page = typeof engine.getRunHistoryPage === "function" + ? engine.getRunHistoryPage(workflowId, { offset, limit }) + : { + runs: engine.getRunHistory ? engine.getRunHistory(workflowId, limit) : [], + total: engine.getRunHistory ? engine.getRunHistory(workflowId).length : 0, offset, limit, - count: runs.length, - hasMore: page?.hasMore === true || (nextOffset != null && nextOffset < total), - nextOffset, - }, - }; + }; + const runs = Array.isArray(page?.runs) ? page.runs : []; + const total = Number.isFinite(Number(page?.total)) ? Number(page.total) : runs.length; + const nextOffset = Number.isFinite(Number(page?.nextOffset)) + ? Number(page.nextOffset) + : (offset + runs.length < total ? offset + runs.length : null); + jsonResponse(res, 200, { + ok: true, + runs, + pagination: { + total, + offset, + limit, + count: runs.length, + hasMore: page?.hasMore === true || (nextOffset != null && nextOffset < total), + nextOffset, + }, }); - jsonResponse(res, 200, payload); return; } @@ -20136,18 +19161,9 @@ if (path === "/api/agent-logs/context") { } // GET — return full workflow definition - const payload = await getOrComputeCachedApiResponse(`workflows:detail:${workflowId}:${url.search}`, 3000, async () => { - const wf = engine.get(workflowId); - if (!wf) { - return { __error: true, status: 404, body: { ok: false, error: "Workflow not found" } }; - } - return { ok: true, workflow: wf }; - }); - if (payload?.__error) { - jsonResponse(res, payload.status, payload.body); - return; - } - jsonResponse(res, 200, payload); + const wf = engine.get(workflowId); + if (!wf) { jsonResponse(res, 404, { ok: false, error: "Workflow not found" }); return; } + jsonResponse(res, 200, { ok: true, workflow: wf }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); } @@ -20521,24 +19537,6 @@ if (path === "/api/agent-logs/context") { } const mf = await import("../workflow/manual-flows.mjs"); const ctx = resolveActiveWorkspaceExecutionContext(); - const template = mf.getFlowTemplate(templateId, ctx.workspaceDir); - const INPUTPolicy = loadGuardrailsPolicy(ctx.workspaceDir || repoRoot); - const flowAssessment = assessInputQuality( - buildManualFlowGuardrailsInput(template, templateId, formValues || {}, executionContext || {}), - INPUTPolicy.INPUT, - ); - const flowAssessmentPayload = flowAssessment.blocked && hasGuardrailsOverride(body || {}) - ? { ...flowAssessment, overrideAccepted: true } - : flowAssessment; - if (flowAssessment.blocked && flowAssessmentPayload.overrideAccepted !== true) { - jsonResponse(res, 400, { - ok: false, - error: "Input blocked by INPUT guardrails", - code: "guardrails_INPUT_blocked", - assessment: flowAssessmentPayload, - }); - return; - } const wfCtx = await getWorkflowRequestContext(url, { bootstrapTemplates: false }); const repository = String( executionContext?.repository || @@ -20575,7 +19573,7 @@ if (path === "/api/agent-logs/context") { }, }; const run = await mf.executeFlow(templateId, formValues || {}, ctx.workspaceDir, flowContext); - jsonResponse(res, 200, { ok: true, run, assessment: flowAssessmentPayload }); + jsonResponse(res, 200, { ok: true, run }); } catch (err) { jsonResponse(res, 500, { ok: false, error: err.message }); } @@ -20662,142 +19660,6 @@ if (path === "/api/agent-logs/context") { return; } - if (path === "/api/guardrails" && req.method === "GET") { - try { - const snapshot = buildGuardrailsSnapshot(); - jsonResponse(res, 200, { ok: true, snapshot }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - - if (path === "/api/guardrails/policy" && req.method === "POST") { - try { - const body = await readJsonBody(req); - const INPUTPatch = body?.INPUT && typeof body.INPUT === "object" ? body.INPUT : null; - const pushPatch = body?.push && typeof body.push === "object" ? body.push : null; - const directPatch = body && typeof body === "object" && !Array.isArray(body) - ? body - : null; - const nextINPUTPatch = INPUTPatch || (directPatch && !Object.prototype.hasOwnProperty.call(directPatch, "push") ? directPatch : null); - if (!nextINPUTPatch && !pushPatch) { - jsonResponse(res, 400, { ok: false, error: "INPUT or push policy object is required" }); - return; - } - - const workspaceContext = resolveActiveWorkspaceExecutionContext(); - const workspaceDir = String(workspaceContext?.workspaceDir || repoRoot).trim() || repoRoot; - const currentPolicy = ensureGuardrailsPolicy(workspaceDir); - const nextPolicy = saveGuardrailsPolicy(workspaceDir, { - ...currentPolicy, - INPUT: nextINPUTPatch - ? { - ...currentPolicy.INPUT, - ...nextINPUTPatch, - } - : currentPolicy.INPUT, - push: pushPatch - ? { - ...currentPolicy.push, - ...pushPatch, - } - : currentPolicy.push, - }); - const snapshot = buildGuardrailsSnapshot(); - broadcastUiEvent(["guardrails", "settings", "overview"], "invalidate", { - reason: "guardrails-policy-updated", - }); - jsonResponse(res, 200, { - ok: true, - INPUT: { - policyPath: getGuardrailsPolicyPath(workspaceDir), - policy: nextPolicy.INPUT, - }, - push: { - policyPath: getGuardrailsPolicyPath(workspaceDir), - policy: nextPolicy.push, - }, - snapshot, - }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - - if (path === "/api/guardrails/runtime" && req.method === "POST") { - try { - const body = await readJsonBody(req); - const runtimePatch = body?.runtime && typeof body.runtime === "object" - ? body.runtime - : body && typeof body === "object" - ? body - : null; - const preflightProvided = runtimePatch && hasOwn(runtimePatch, "preflightEnabled"); - const requireReviewProvided = runtimePatch && hasOwn(runtimePatch, "requireReview"); - if (!preflightProvided && !requireReviewProvided) { - jsonResponse(res, 400, { - ok: false, - error: "preflightEnabled or requireReview must be provided", - }); - return; - } - - let configPath = resolveConfigPath(); - if (preflightProvided) { - const { configPath: nextConfigPath, configData } = readConfigDocument(); - configPath = nextConfigPath; - configData.preflightEnabled = parseBooleanLike(runtimePatch.preflightEnabled, true); - writeFileSync(configPath, JSON.stringify(configData, null, 2) + "\n", "utf8"); - } - - const envPath = resolve(resolveUiConfigDir(), ".env"); - if (requireReviewProvided) { - const requireReview = parseBooleanLike(runtimePatch.requireReview, true); - process.env.BOSUN_FLOW_REQUIRE_REVIEW = requireReview ? "true" : "false"; - updateEnvFile({ BOSUN_FLOW_REQUIRE_REVIEW: process.env.BOSUN_FLOW_REQUIRE_REVIEW }); - } - - const snapshot = buildGuardrailsSnapshot(); - broadcastUiEvent(["guardrails", "settings", "overview"], "invalidate", { - reason: "guardrails-runtime-updated", - }); - jsonResponse(res, 200, { - ok: true, - configPath, - envPath, - runtime: snapshot.runtime, - snapshot, - }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - - if (path === "/api/guardrails/assess" && req.method === "POST") { - try { - const body = await readJsonBody(req); - const workspaceContext = resolveActiveWorkspaceExecutionContext(); - const workspaceDir = String(workspaceContext?.workspaceDir || repoRoot).trim() || repoRoot; - const policy = ensureGuardrailsPolicy(workspaceDir); - const assessmentInput = body?.input ?? body?.payload ?? body?.assessmentInput ?? body; - const assessment = assessInputQuality(assessmentInput, policy.INPUT); - jsonResponse(res, 200, { - ok: true, - assessment, - INPUT: { - policyPath: getGuardrailsPolicyPath(workspaceDir), - policy: policy.INPUT, - }, - }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - if (path === "/api/health-stats") { const SIX_HOURS_MS = 6 * 60 * 60 * 1000; const cutoff = new Date(Date.now() - SIX_HOURS_MS).toISOString(); @@ -20819,6 +19681,7 @@ if (path === "/api/agent-logs/context") { jsonResponse(res, 200, { ok: true, successRuns, failedRuns, total, failRate, windowHours: 6 }); return; } + if (path === "/api/config") { const regionEnv = (process.env.EXECUTOR_REGIONS || "").trim(); const regions = regionEnv ? regionEnv.split(",").map((r) => r.trim()).filter(Boolean) : ["auto"]; @@ -20857,161 +19720,6 @@ if (path === "/api/agent-logs/context") { return; } - // ── /api/env/detect — auto-detect project type from a repo path ────────────── - if (path === "/api/env/detect" && req.method === "POST") { - try { - const body = await readJsonBody(req); - const targetPath = body?.repoPath ? resolve(body.repoPath) : repoRoot; - const [{ detectProjectStack }, { detectEnvironmentTemplate, templateToRepoEnvironment }] = await Promise.all([ - import("../workflow/project-detection.mjs"), - import("../workspace/env-templates.mjs"), - ]); - const stackResult = detectProjectStack(targetPath); - const templateResult = detectEnvironmentTemplate(targetPath); - const environment = templateResult.template - ? templateToRepoEnvironment(templateResult.template) - : null; - jsonResponse(res, 200, { - ok: true, - stack: stackResult.primary ? { - id: stackResult.primary.id, - label: stackResult.primary.label, - packageManager: stackResult.primary.packageManager, - frameworks: stackResult.primary.frameworks, - commands: stackResult.primary.commands, - } : null, - allStacks: stackResult.stacks.map((s) => ({ id: s.id, label: s.label, packageManager: s.packageManager })), - template: templateResult.template ? { - id: templateResult.template.id, - label: templateResult.template.label, - group: templateResult.template.group, - icon: templateResult.template.icon, - } : null, - confidence: templateResult.confidence, - environment, - }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - - // ── /api/repos/environment — read per-repo environment config ───────────── - if (path === "/api/repos/environment" && req.method === "GET") { - try { - const repoName = searchParams.get("name") || searchParams.get("repo"); - const { configData } = readConfigDocument(); - const repos = Array.isArray(configData?.repositories) - ? configData.repositories - : Array.isArray(configData?.repositories?.items) - ? configData.repositories.items - : []; - - if (!repoName) { - // Return environment config for all repos - const result = repos.map((r) => ({ - name: r.name || r.id || r.slug, - slug: r.slug, - environment: r.environment || null, - })); - jsonResponse(res, 200, { ok: true, repos: result }); - return; - } - - const repo = repos.find( - (r) => r.name === repoName || r.id === repoName || r.slug === repoName || r.slug?.endsWith("/" + repoName), - ); - if (!repo) { - jsonResponse(res, 404, { ok: false, error: `Repository '${repoName}' not found in config` }); - return; - } - jsonResponse(res, 200, { ok: true, name: repo.name, slug: repo.slug, environment: repo.environment || null }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - - // ── /api/repos/environment — save per-repo environment config ───────────── - if (path === "/api/repos/environment" && req.method === "POST") { - try { - const body = await readJsonBody(req); - const { name: repoName, slug: repoSlug, environment } = body || {}; - const identifier = repoName || repoSlug; - if (!identifier) { - jsonResponse(res, 400, { ok: false, error: "name or slug is required" }); - return; - } - if (!environment || typeof environment !== "object") { - jsonResponse(res, 400, { ok: false, error: "environment object is required" }); - return; - } - const { configPath, configData } = readConfigDocument(); - let repos = Array.isArray(configData?.repositories) - ? configData.repositories - : Array.isArray(configData?.repositories?.items) - ? configData.repositories.items - : []; - - const idx = repos.findIndex( - (r) => r.name === identifier || r.id === identifier || r.slug === identifier || r.slug?.endsWith("/" + identifier), - ); - if (idx < 0) { - jsonResponse(res, 404, { ok: false, error: `Repository '${identifier}' not found in config` }); - return; - } - - // Sanitize environment config — only allow known fields - const sanitized = {}; - const allowed = ["template", "installCommands", "startCommand", "buildCommand", "testCommand", "lintCommand", "debugCommand", "worktreeSetupScript", "sharedPaths"]; - for (const key of allowed) { - if (key in environment) sanitized[key] = environment[key]; - } - - if (Array.isArray(configData.repositories)) { - configData.repositories[idx] = { ...configData.repositories[idx], environment: sanitized }; - } else { - configData.repositories.items[idx] = { ...configData.repositories.items[idx], environment: sanitized }; - } - - writeFileSync(configPath, JSON.stringify(configData, null, 2) + "\n", "utf8"); - broadcastUiEvent(["settings", "overview"], "invalidate", { reason: "repo-environment-updated", repo: identifier }); - jsonResponse(res, 200, { ok: true, name: identifier, environment: sanitized }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - - // ── /api/env/templates — list all environment templates ────────────────── - if (path === "/api/env/templates" && req.method === "GET") { - try { - const { ENV_TEMPLATES, getTemplatesByGroup } = await import("../workspace/env-templates.mjs"); - jsonResponse(res, 200, { - ok: true, - templates: ENV_TEMPLATES.map((t) => ({ - id: t.id, - label: t.label, - description: t.description, - icon: t.icon, - group: t.group, - installCommands: t.installCommands, - startCommand: t.startCommand, - buildCommand: t.buildCommand, - testCommand: t.testCommand, - lintCommand: t.lintCommand, - debugCommand: t.debugCommand, - worktreeSetupCommands: t.worktreeSetupCommands, - sharedPaths: t.sharedPaths, - })), - groups: Object.keys(getTemplatesByGroup()), - }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return; - } - if (path === "/api/config/update") { try { const body = await readJsonBody(req); @@ -22070,17 +20778,14 @@ if (path === "/api/agent-logs/context") { if (!session) return null; return sessionMatchesWorkspaceContext(session, workspaceContext) ? session : null; }; - const getScopedSessionRecord = ({ includeMessages = false } = {}) => { - const session = includeMessages - ? tracker.getSessionMessages(sessionId) - : (tracker.getSessionById(sessionId) || tracker.getSessionMessages(sessionId)); - if (!session) return null; - return sessionMatchesWorkspaceContext(session, workspaceContext) ? session : null; - }; if (!action && req.method === "GET") { try { - const session = getScopedSessionRecord({ includeMessages: true }); + if (!getScopedSession()) { + jsonResponse(res, 404, { ok: false, error: "Session not found" }); + return; + } + const session = tracker.getSessionMessages(sessionId); if (!session) { jsonResponse(res, 404, { ok: false, error: "Session not found" }); return; @@ -22507,7 +21212,7 @@ if (path === "/api/agent-logs/context") { if (action === "diff" && req.method === "GET") { try { - const session = getScopedSessionRecord(); + const session = getScopedSession(); if (!session) { jsonResponse(res, 200, { ok: true, @@ -23864,186 +22569,6 @@ if (path === "/api/agent-logs/context") { jsonResponse(res, 404, { ok: false, error: "Unknown API endpoint" }); } -// ─── Vault singleton ──────────────────────────────────────────────────────────── -let _vault = null; -function getVault() { - if (!_vault) _vault = new VaultStore(); - return _vault; -} - -async function ensureVaultOpen() { - const v = getVault(); - if (v.isUnlocked()) return v; - if (!v.isInitialized()) return null; - try { - const { key } = keychainGetOrCreate(); - v.open(key); - return v; - } catch { - return null; - } -} - -// ─── /api/vault/* routes ──────────────────────────────────────────────────────── -async function handleVaultApi(req, res, path) { - // GET /api/vault/status - if (path === "/api/vault/status" && req.method === "GET") { - const v = getVault(); - let status = v.status(); - if (!status.unlocked && v.isInitialized()) { - try { - const { key } = keychainGetOrCreate(); - v.open(key); - status = v.status(); - } catch { /* stays locked */ } - } - jsonResponse(res, 200, { ok: true, data: status }); - return true; - } - - // POST /api/vault/init - if (path === "/api/vault/init" && req.method === "POST") { - const v = getVault(); - if (v.isInitialized()) { - jsonResponse(res, 409, { ok: false, error: "Vault already initialized." }); - return true; - } - try { - const { key } = keychainGetOrCreate(); - v.init(key); - jsonResponse(res, 200, { ok: true, data: { initialized: true } }); - } catch (err) { - jsonResponse(res, 500, { ok: false, error: err.message }); - } - return true; - } - - // GET /api/vault/integrations - if (path === "/api/vault/integrations" && req.method === "GET") { - jsonResponse(res, 200, { ok: true, data: INTEGRATIONS }); - return true; - } - - // GET /api/vault/secrets - if (path === "/api/vault/secrets" && req.method === "GET") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - jsonResponse(res, 200, { ok: true, data: v.listSecrets() }); - return true; - } - - // POST /api/vault/secrets - if (path === "/api/vault/secrets" && req.method === "POST") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - try { - const body = await readJsonBody(req); - const id = v.createSecret(body); - jsonResponse(res, 201, { ok: true, data: { id, ...v.getSecret(id) } }); - } catch (err) { - jsonResponse(res, 400, { ok: false, error: err.message }); - } - return true; - } - - // GET /api/vault/secrets/:id - const secretIdMatch = path.match(/^\/api\/vault\/secrets\/([^/]+)$/); - if (secretIdMatch && req.method === "GET") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - try { - jsonResponse(res, 200, { ok: true, data: v.getSecret(secretIdMatch[1]) }); - } catch (err) { - jsonResponse(res, 404, { ok: false, error: err.message }); - } - return true; - } - - // PUT /api/vault/secrets/:id - if (secretIdMatch && req.method === "PUT") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - try { - const body = await readJsonBody(req); - v.updateSecret(secretIdMatch[1], body); - jsonResponse(res, 200, { ok: true, data: v.getSecret(secretIdMatch[1]) }); - } catch (err) { - jsonResponse(res, 404, { ok: false, error: err.message }); - } - return true; - } - - // DELETE /api/vault/secrets/:id - if (secretIdMatch && req.method === "DELETE") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - try { - v.deleteSecret(secretIdMatch[1]); - jsonResponse(res, 200, { ok: true }); - } catch (err) { - jsonResponse(res, 404, { ok: false, error: err.message }); - } - return true; - } - - // PUT /api/vault/secrets/:id/permissions - const permMatch = path.match(/^\/api\/vault\/secrets\/([^/]+)\/permissions$/); - if (permMatch && req.method === "PUT") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - try { - const body = await readJsonBody(req); - v.setPermissions(permMatch[1], body); - jsonResponse(res, 200, { ok: true, data: v.getSecret(permMatch[1]).permissions }); - } catch (err) { - jsonResponse(res, 404, { ok: false, error: err.message }); - } - return true; - } - - // GET /api/vault/env - if (path === "/api/vault/env" && req.method === "GET") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - const keys = v.listEnvKeys(); - jsonResponse(res, 200, { ok: true, data: { keys } }); - return true; - } - - // POST /api/vault/env - if (path === "/api/vault/env" && req.method === "POST") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - try { - const body = await readJsonBody(req); - if (!body?.key || typeof body.key !== "string") { - jsonResponse(res, 400, { ok: false, error: "key is required" }); return true; - } - v.setEnv(body.key, String(body.value ?? "")); - jsonResponse(res, 200, { ok: true, data: { key: body.key } }); - } catch (err) { - jsonResponse(res, 400, { ok: false, error: err.message }); - } - return true; - } - - // DELETE /api/vault/env/:key - const envKeyMatch = path.match(/^\/api\/vault\/env\/([^/]+)$/); - if (envKeyMatch && req.method === "DELETE") { - const v = await ensureVaultOpen(); - if (!v) { jsonResponse(res, 503, { ok: false, error: "Vault locked or not initialized." }); return true; } - try { - v.deleteEnv(decodeURIComponent(envKeyMatch[1])); - jsonResponse(res, 200, { ok: true }); - } catch (err) { - jsonResponse(res, 400, { ok: false, error: err.message }); - } - return true; - } - - return false; // not handled -} - async function handleStatic(req, res, url) { if (tryLocalSessionBootstrap(req, res, url)) { return; @@ -24160,9 +22685,7 @@ export async function startTelegramUiServer(options = {}) { const isTestRun = Boolean(process.env.VITEST) || process.env.NODE_ENV === "test" || - Boolean(process.env.JEST_WORKER_ID) || - Boolean(process.env.NODE_TEST_CONTEXT) || - process.execArgv.includes("--test"); + Boolean(process.env.JEST_WORKER_ID); if (isTestRun && typeof taskStoreModule?.configureTaskStore === "function") { const cacheDir = sandbox?.cacheDir || resolve(repoRoot, ".bosun", ".cache"); const isolatedStorePath = resolve( @@ -24173,16 +22696,13 @@ export async function startTelegramUiServer(options = {}) { } const skipInstanceLock = options.skipInstanceLock === true || - (options.skipInstanceLock !== false && ( - process.env.BOSUN_UI_SKIP_INSTANCE_LOCK === "1" || - isTestRun - )); + process.env.BOSUN_UI_SKIP_INSTANCE_LOCK === "1" || + isTestRun; const allowEphemeralPort = options.allowEphemeralPort === true || process.env.BOSUN_UI_ALLOW_EPHEMERAL_PORT === "1" || isTestRun; - const persistedPortRecord = readLastUiPortRecord(); - const persistedPort = persistedPortRecord?.port || 0; + const persistedPort = readLastUiPort(); const shouldReusePersistedPort = options.port == null && configuredPort === 0 && @@ -24226,15 +22746,15 @@ export async function startTelegramUiServer(options = {}) { ); if (!skipInstanceLock) { - const lockResult = await tryAcquireUiInstanceLock({ - preferredPort: port, - staleGraceMs: options.instanceLockStaleGraceMs, - probeTimeoutMs: options.instanceLockProbeTimeoutMs, - }); + const lockResult = tryAcquireUiInstanceLock({ preferredPort: port }); if (!lockResult.ok) { const existing = lockResult.existing || {}; + const existingTarget = existing.url + || (existing.port + ? `${existing.protocol || "http"}://${existing.host || "127.0.0.1"}:${existing.port}` + : "unknown"); console.warn( - `[telegram-ui] duplicate runtime detected (pid=${existing.pid}) — skipping secondary UI server start (${describeUiInstanceTarget(existing)})`, + `[telegram-ui] duplicate runtime detected (pid=${existing.pid}) — skipping secondary UI server start (${existingTarget})`, ); return null; } @@ -24245,7 +22765,6 @@ export async function startTelegramUiServer(options = {}) { if (!isTlsDisabled()) { tlsOpts = ensureSelfSignedCert(); } - let usedFallbackPort = false; const requestHandler = async (req, res) => { const url = new URL( @@ -24902,7 +23421,6 @@ export async function startTelegramUiServer(options = {}) { listenPort > 0; if (canRetryPortIncrement) { const nextPort = listenPort + 1; - usedFallbackPort = true; console.warn( `[telegram-ui] port ${listenPort} in use; retrying on ${nextPort} (attempt ${attempt + 1}/${maxPortFallbackAttempts})`, ); @@ -24914,7 +23432,6 @@ export async function startTelegramUiServer(options = {}) { const canRetryWithEphemeral = allowEphemeralPort && listenPort > 0 && (code === "EADDRINUSE" || code === "EACCES"); if (!canRetryWithEphemeral) throw err; - usedFallbackPort = true; console.warn( `[telegram-ui] failed to bind ${host}:${listenPort} (${code || "unknown"}); retrying with ephemeral port`, ); @@ -24923,9 +23440,6 @@ export async function startTelegramUiServer(options = {}) { } } } catch (err) { - if (shouldReusePersistedPort) { - clearLastUiPort(); - } releaseUiInstanceLock(); throw err; } @@ -24955,8 +23469,6 @@ export async function startTelegramUiServer(options = {}) { || normalized === "localhost" || normalized === "::1"; }; - const autoOpenSuppressedForFallbackPort = - usedFallbackPort || (port > 0 && actualPort !== port); const protocol = uiServerTls ? "https" : publicHost && !isLocalOrPrivateHost(publicHost) @@ -24971,7 +23483,7 @@ export async function startTelegramUiServer(options = {}) { url: uiServerUrl, startedAt: Date.now(), }); - persistLastUiPort(actualPort, { host, protocol, url: uiServerUrl }); + persistLastUiPort(actualPort); setComponentStatus("server", "running"); console.log(`[telegram-ui] server listening on ${uiServerUrl}`); if (uiServerTls) { @@ -25022,11 +23534,7 @@ export async function startTelegramUiServer(options = {}) { // - skip during Vitest / Jest test runs (avoids opening 20+ tabs during `npm test`) // - only open ONCE per process (singleton guard — prevents loops on server restart) const isTestRunRuntime = - process.env.VITEST || - process.env.NODE_ENV === "test" || - process.env.JEST_WORKER_ID || - process.env.NODE_TEST_CONTEXT || - process.execArgv.includes("--test"); + process.env.VITEST || process.env.NODE_ENV === "test" || process.env.JEST_WORKER_ID; const restartReason = String( options.restartReason || process.env.BOSUN_MONITOR_RESTART_REASON || "", ).trim(); @@ -25036,21 +23544,11 @@ export async function startTelegramUiServer(options = {}) { `[telegram-ui] auto-open suppressed during restart (${restartReason})`, ); } - if ( - autoOpenEnabled && - !options.skipAutoOpen && - autoOpenSuppressedForFallbackPort - ) { - console.log( - `[telegram-ui] auto-open suppressed because requested port ${port} was unavailable and UI bound ${actualPort} instead`, - ); - } if ( autoOpenEnabled && process.env.BOSUN_DESKTOP !== "1" && !options.skipAutoOpen && !suppressAutoOpenForRestart && - !autoOpenSuppressedForFallbackPort && !_browserOpened && !isTestRunRuntime && shouldAutoOpenBrowserNow() @@ -25163,10 +23661,12 @@ export function stopTelegramUiServer() { /* best effort */ } uiServer = null; - uiServerUrl = null; uiServerTls = false; resetProjectSyncWebhookMetrics(); releaseUiInstanceLock(); } export { getLocalLanIp }; + + + diff --git a/server/workflow-engine-worker.mjs b/server/workflow-engine-worker.mjs deleted file mode 100644 index 396b7049f..000000000 --- a/server/workflow-engine-worker.mjs +++ /dev/null @@ -1,258 +0,0 @@ -/** - * workflow-engine-worker.mjs - * - * Worker thread that hosts the Bosun workflow engine, completely isolated from - * the UI server's HTTP / WebSocket event loop. - * - * Protocol (parentPort messages): - * - * Parent → Worker: - * { type: "init", workerData: { repoRoot, workflowDir, runsDir } } - * { type: "call", callId, method, args } — proxied engine method call - * { type: "svc-res", callId, result?, error? } — response to a service call - * - * Worker → Parent: - * { type: "ready" } — engine fully initialised - * { type: "result", callId, result } — successful engine call - * { type: "error", callId, error, stack? } — failed engine call - * { type: "event", eventName, payload } — forwarded engine event - * { type: "svc-call",callId, method, args } — request main-thread service - */ - -import { parentPort, workerData } from "node:worker_threads"; -import { randomUUID } from "node:crypto"; -import { resolve, dirname } from "node:path"; -import { fileURLToPath, pathToFileURL } from "node:url"; - -const __dirname = dirname(fileURLToPath(import.meta.url)); -const repoRoot = workerData?.repoRoot || resolve(__dirname, ".."); - -// ── Pending service calls (worker awaiting main-thread response) ────────────── -const pendingSvcCalls = new Map(); - -function callMainService(method, args) { - return new Promise((resolve, reject) => { - const callId = randomUUID(); - pendingSvcCalls.set(callId, { resolve, reject }); - parentPort.postMessage({ type: "svc-call", callId, method, args }); - }); -} - -// ── Load workflow modules ────────────────────────────────────────────────────── -let engine = null; - -async function initEngine(cfg = {}) { - const base = pathToFileURL(repoRoot + "/").href; - - const [wfEngineMod, wfNodesMod, wfTemplatesMod] = await Promise.all([ - import(new URL("./workflow/workflow-engine.mjs", base).href), - import(new URL("./workflow/workflow-nodes.mjs", base).href), - import(new URL("./workflow/workflow-templates.mjs", base).href), - ]); - - if (typeof wfNodesMod?.ensureWorkflowNodeTypesLoaded === "function") { - await wfNodesMod.ensureWorkflowNodeTypesLoaded({ repoRoot }); - } - - // ── Build proxied service bundle ────────────────────────────────────────── - const telegram = { - async sendMessage(chatId, text, opts = {}) { - return callMainService("telegram.sendMessage", [chatId, text, opts]); - }, - }; - - const agentPool = { - async launchEphemeralThread(prompt, cwd, timeout, opts) { - return callMainService("agentPool.launchEphemeralThread", [prompt, cwd, timeout, opts]); - }, - async launchOrResumeThread(prompt, cwd, opts) { - return callMainService("agentPool.launchOrResumeThread", [prompt, cwd, opts]); - }, - async execWithRetry(taskKey, fn, opts) { - /* execWithRetry takes a function — not serialisable; call main-thread shim */ - return callMainService("agentPool.execWithRetry", [taskKey, null, opts]); - }, - async continueSession(sessionId, prompt, opts) { - return callMainService("agentPool.continueSession", [sessionId, prompt, opts]); - }, - async killSession(sessionId) { - return callMainService("agentPool.killSession", [sessionId]); - }, - }; - - const kanban = { - async createTask(projectIdOrData, taskData) { - return callMainService("kanban.createTask", [projectIdOrData, taskData]); - }, - async updateTaskStatus(taskId, status, opts) { - return callMainService("kanban.updateTaskStatus", [taskId, status, opts]); - }, - async listTasks(projectId, filters) { - return callMainService("kanban.listTasks", [projectId, filters]); - }, - async getTask(taskId) { - return callMainService("kanban.getTask", [taskId]); - }, - }; - - const meeting = { - async schedule(...args) { return callMainService("meeting.schedule", args); }, - async cancel(...args) { return callMainService("meeting.cancel", args); }, - async get(...args) { return callMainService("meeting.get", args); }, - }; - - const services = { telegram, agentPool, kanban, meeting }; - - // ── Create engine ───────────────────────────────────────────────────────── - engine = wfEngineMod.getWorkflowEngine({ - workflowDir: cfg.workflowDir, - runsDir: cfg.runsDir, - services, - detectInterruptedRuns: false, - }); - - // ── Forward engine events to main thread ────────────────────────────────── - const FORWARDED_EVENTS = [ - "run:start", "run:end", "run:error", "run:cancel:requested", - "node:start", "node:complete", "node:error", "node:skip", - "edge:flow", - ]; - for (const eventName of FORWARDED_EVENTS) { - engine.on(eventName, (payload) => { - try { - parentPort.postMessage({ type: "event", eventName, payload: sanitise(payload) }); - } catch { /* best-effort */ } - }); - } - - // ── Install recommended templates ───────────────────────────────────────── - if (typeof wfTemplatesMod?.installRecommendedWorkflowTemplates === "function") { - try { - await wfTemplatesMod.installRecommendedWorkflowTemplates(engine); - } catch { /* non-fatal */ } - } - - // ── Resume interrupted runs ──────────────────────────────────────────────── - if (typeof engine.resumeInterruptedRuns === "function") { - setTimeout(() => engine.resumeInterruptedRuns().catch(() => {}), 0); - } -} - -// ── Serialize engine return values for structured clone ──────────────────────── -function sanitise(value, depth = 0) { - if (depth > 8 || value === null || value === undefined) return value; - if (typeof value === "function") return "[Function]"; - if (typeof value !== "object") return value; - if (Array.isArray(value)) return value.map((v) => sanitise(v, depth + 1)); - const out = {}; - for (const [k, v] of Object.entries(value)) { - if (k.startsWith("_") && depth > 0) continue; - out[k] = sanitise(v, depth + 1); - } - return out; -} - -// ── Engine method dispatcher ────────────────────────────────────────────────── -async function dispatch(method, args) { - if (!engine) throw new Error("Workflow engine not yet initialised"); - switch (method) { - case "execute": { - const [workflowId, input, opts] = args; - const ctx = await engine.execute(workflowId, input, opts); - /* Return only the fields the main thread needs to avoid serialisation issues */ - return { - id: ctx?.id, - workflowId: ctx?.workflowId, - status: ctx?.status, - errors: ctx?.errors || [], - data: sanitise(ctx?.data), - }; - } - case "evaluateTriggers": - return engine.evaluateTriggers(...args); - case "get": - return sanitise(engine.get(...args)); - case "list": - return sanitise(engine.list(...args)); - case "getRunHistory": - return sanitise(await engine.getRunHistory?.(...args)); - case "getRunHistoryPage": - return sanitise(await engine.getRunHistoryPage?.(...args)); - case "getRunDetail": - return sanitise(await engine.getRunDetail?.(...args)); - case "getRunForensics": - return sanitise(await engine.getRunForensics?.(...args)); - case "getNodeForensics": - return sanitise(await engine.getNodeForensics?.(...args)); - case "getRetryOptions": - return sanitise(await engine.getRetryOptions?.(...args)); - case "retryRun": { - const ctx = await engine.retryRun(...args); - return { id: ctx?.id, workflowId: ctx?.workflowId, status: ctx?.status, errors: ctx?.errors || [] }; - } - case "restoreFromSnapshot": { - const ctx = await engine.restoreFromSnapshot?.(...args); - return { id: ctx?.id, workflowId: ctx?.workflowId, status: ctx?.status, errors: ctx?.errors || [] }; - } - case "cancelRun": - return engine.cancelRun?.(...args); - case "createRunSnapshot": - return engine.createRunSnapshot?.(...args); - case "listSnapshots": - return sanitise(engine.listSnapshots?.(...args)); - case "save": - return engine.save(...args); - case "import": - return sanitise(engine.import(...args)); - case "delete": - return engine.delete?.(...args); - case "getConcurrencyStats": - return sanitise(engine.getConcurrencyStats?.()); - case "getTaskTraceEvents": - return sanitise(await engine.getTaskTraceEvents?.(...args)); - case "load": - return engine.load?.(); - case "resumeInterruptedRuns": - return engine.resumeInterruptedRuns?.(); - case "registerTaskTraceHook": - /* Hooks cannot cross thread boundaries; silently ignore */ - return null; - default: - throw new Error(`Unknown engine method: ${method}`); - } -} - -// ── Message handler ──────────────────────────────────────────────────────────── -parentPort.on("message", async (msg) => { - if (!msg || typeof msg.type !== "string") return; - - if (msg.type === "init") { - try { - await initEngine(msg.workerData || {}); - parentPort.postMessage({ type: "ready" }); - } catch (err) { - parentPort.postMessage({ type: "error", callId: null, error: err.message, stack: err.stack }); - } - return; - } - - if (msg.type === "svc-res") { - const pending = pendingSvcCalls.get(msg.callId); - if (pending) { - pendingSvcCalls.delete(msg.callId); - if (msg.error) pending.reject(Object.assign(new Error(msg.error), { code: msg.code })); - else pending.resolve(msg.result); - } - return; - } - - if (msg.type === "call") { - const { callId, method, args } = msg; - try { - const result = await dispatch(method, args || []); - parentPort.postMessage({ type: "result", callId, result }); - } catch (err) { - parentPort.postMessage({ type: "error", callId, error: err.message, stack: err.stack }); - } - } -}); diff --git a/setup.mjs b/setup.mjs index 7cf00cdf2..dace073f8 100755 --- a/setup.mjs +++ b/setup.mjs @@ -1,3 +1,5 @@ +#!/usr/bin/env node + /** * bosun — Setup Wizard * @@ -24,7 +26,6 @@ import { resolve, dirname, basename, relative, isAbsolute } from "node:path"; import { execSync, spawnSync } from "node:child_process"; import { execFileSync } from "node:child_process"; import { fileURLToPath } from "node:url"; -import "./infra/windows-hidden-child-processes.mjs"; import { readCodexConfig, getConfigPath, @@ -58,10 +59,7 @@ import { resolveWorkflowTemplateIds, normalizeTemplateOverridesById, } from "./workflow/workflow-templates.mjs"; -import { - discoverTelegramPairingChat, - normalizeTelegramPairingCode, -} from "./telegram/get-telegram-chat-id.mjs"; +import { discoverTelegramChats } from "./telegram/get-telegram-chat-id.mjs"; const __dirname = dirname(fileURLToPath(import.meta.url)); @@ -91,126 +89,12 @@ function getVersion() { } } -const TELEGRAM_PAIRING_CODE_ALPHABET = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"; - -function generateTelegramPairingCode(randomFn = Math.random) { - let code = ""; - for (let i = 0; i < 6; i += 1) { - const idx = Math.floor(randomFn() * TELEGRAM_PAIRING_CODE_ALPHABET.length); - code += TELEGRAM_PAIRING_CODE_ALPHABET[idx] || TELEGRAM_PAIRING_CODE_ALPHABET[0]; - } - return code; -} - -function buildTelegramAllowedChatIds(primaryChatId, ...rawValues) { - const seen = new Set(); - const ordered = []; - - for (const rawValue of [primaryChatId, ...rawValues]) { - for (const value of String(rawValue || "") - .split(",") - .map((item) => item.trim()) - .filter(Boolean)) { - if (seen.has(value)) continue; - seen.add(value); - ordered.push(value); - } - } - - return ordered.join(","); -} - -function applyTelegramChatPairing(env, chatId, sourceEnv = process.env) { - const normalizedChatId = String(chatId || "").trim(); - if (!normalizedChatId) return ""; - - const existingAllowed = buildTelegramAllowedChatIds( - normalizedChatId, - env.TELEGRAM_ALLOWED_CHAT_IDS, - env.TELEGRAM_CHAT_ID, - sourceEnv.TELEGRAM_ALLOWED_CHAT_IDS, - sourceEnv.TELEGRAM_CHAT_IDS, - sourceEnv.TELEGRAM_CHAT_ID, - ); - - env.TELEGRAM_CHAT_ID = normalizedChatId; - env.TELEGRAM_ALLOWED_CHAT_IDS = existingAllowed; - return normalizedChatId; -} - -async function promptForTelegramPairedChat({ - prompt, - token, - env, - sourceEnv = process.env, -}) { - const pairingCode = normalizeTelegramPairingCode(generateTelegramPairingCode()); - - console.log("\n" + chalk.bold("Step 3: Pair Your Telegram Chat")); - console.log( - " Bosun will capture the right chat automatically and lock commands to it.\n", - ); - console.log(" 1. Open Telegram and DM your bot"); - console.log(" 2. Send this pairing code:"); - console.log(` ${chalk.cyan(pairingCode)}`); - console.log(" 3. Come back here and press enter so Bosun can finish pairing"); - console.log(); - - while (true) { - const ready = await prompt.confirm( - "I sent the pairing code to my bot. Pair now?", - true, - ); - if (!ready) break; - - info("Checking Telegram for the pairing code..."); - try { - const pairing = await discoverTelegramPairingChat(token, pairingCode); - if (pairing.chat?.id) { - const pairedChatId = applyTelegramChatPairing( - env, - pairing.chat.id, - sourceEnv, - ); - const pairedLabel = - pairing.chat.username - ? `@${pairing.chat.username}` - : pairing.chat.title || pairing.chat.type || pairedChatId; - info(`✓ Paired ${pairedLabel}`); - info("✓ Telegram commands are locked to the paired chat"); - console.log(); - return pairedChatId; - } - warn(pairing.message || "Pairing code not found yet."); - } catch (err) { - warn(`Failed to check Telegram pairing: ${err.message}`); - } - - console.log( - chalk.dim( - " If you already sent the code, wait a moment and try again. The fallback is manual entry.", - ), - ); - const retry = await prompt.confirm("Try pairing again?", true); - if (!retry) break; - console.log(); - } - - console.log(); - info("Falling back to manual chat ID entry."); - console.log( - chalk.dim( - " Tip: you can still use bosun-chat-id --pair or set TELEGRAM_CHAT_ID manually later.", - ), - ); - const manualChatId = await prompt.ask( - "Chat ID (leave empty to set this up later)", - "", - ); - if (manualChatId) { - applyTelegramChatPairing(env, manualChatId, sourceEnv); - } - return env.TELEGRAM_CHAT_ID || ""; +function formatTelegramChatChoice(chat) { + const parts = [String(chat.id)]; + if (chat.type) parts.push(chat.type); + if (chat.username) parts.push(`@${chat.username}`); + if (chat.title) parts.push(chat.title); + return parts.join(" · "); } function hasSetupMarkers(dir) { @@ -3881,8 +3765,8 @@ async function main() { if (!hasBotReady) { warn("No problem! You can set up Telegram later by:"); console.log(" 1. Adding TELEGRAM_BOT_TOKEN to .env"); - console.log(" 2. Re-running: bosun --setup to pair your chat"); - console.log(" 3. Or setting TELEGRAM_CHAT_ID / TELEGRAM_ALLOWED_CHAT_IDS manually"); + console.log(" 2. Adding TELEGRAM_CHAT_ID to .env"); + console.log(" 3. Or re-running: bosun --setup"); console.log(); } else { // Step 2: Get bot token @@ -3912,12 +3796,105 @@ async function main() { info("✓ Token format looks good"); } - await promptForTelegramPairedChat({ - prompt, - token: env.TELEGRAM_BOT_TOKEN, - env, - sourceEnv: process.env, - }); + // Step 3: Get chat ID + console.log("\n" + chalk.bold("Step 3: Get Your Chat ID")); + console.log(" Your chat ID tells the bot where to send messages."); + console.log(); + + const knowsChatId = await prompt.confirm( + "Do you already know your chat ID?", + false, + ); + + if (knowsChatId) { + env.TELEGRAM_CHAT_ID = await prompt.ask( + "Chat ID (numeric, e.g., 123456789)", + process.env.TELEGRAM_CHAT_ID || "", + ); + } else { + // Guide user to get chat ID + console.log("\n" + chalk.cyan("To get your chat ID:") + "\n"); + console.log( + " 1. Open Telegram and search for your bot's username", + ); + console.log( + " 2. Click " + + chalk.cyan("START") + + " or send any message (e.g., 'Hello')", + ); + console.log(" 3. Come back here and we'll detect your chat ID"); + console.log(); + + const ready = await prompt.confirm( + "Ready? (I've messaged my bot)", + false, + ); + + if (ready) { + // Try to fetch chat ID from Telegram API + info("Fetching your chat ID from Telegram..."); + try { + const discovery = await discoverTelegramChats(env.TELEGRAM_BOT_TOKEN); + + if (discovery.chats.length === 1) { + env.TELEGRAM_CHAT_ID = String(discovery.chats[0].id); + info(`✓ Found your chat ID: ${env.TELEGRAM_CHAT_ID}`); + console.log(); + } else if (discovery.chats.length > 1) { + const selectedIdx = await prompt.choose( + "Select the chat Bosun should use:", + discovery.chats.map(formatTelegramChatChoice), + 0, + ); + const selectedChat = discovery.chats[selectedIdx]; + env.TELEGRAM_CHAT_ID = String(selectedChat.id); + info(`✓ Selected chat ID: ${env.TELEGRAM_CHAT_ID}`); + console.log(); + } else { + warn( + discovery.message || + "Couldn't find a chat ID. Make sure you sent a message to your bot.", + ); + console.log( + chalk.dim( + " Or run: bosun-chat-id (after starting the bot)", + ), + ); + env.TELEGRAM_CHAT_ID = await prompt.ask( + "Enter chat ID manually (or leave empty to set up later)", + "", + ); + } + } catch (err) { + warn(`Failed to fetch chat ID: ${err.message}`); + console.log( + chalk.dim( + " You can run: bosun-chat-id (after starting the bot)", + ), + ); + env.TELEGRAM_CHAT_ID = await prompt.ask( + "Enter chat ID manually (or leave empty to set up later)", + "", + ); + } + } else { + console.log(); + info("No problem! You can get your chat ID later by:"); + console.log( + " • Running: " + + chalk.cyan("bosun-chat-id") + + " (after starting bosun)", + ); + console.log( + " • Or manually: " + + chalk.cyan( + "curl 'https://api.telegram.org/bot/getUpdates'", + ), + ); + console.log(" Then add TELEGRAM_CHAT_ID to .env"); + console.log(); + } + } // Step 4: Verify setup if (env.TELEGRAM_CHAT_ID) { @@ -6042,8 +6019,6 @@ async function runNonInteractive({ env.GITHUB_REPO = process.env.GITHUB_REPO || slug || ""; env.TELEGRAM_BOT_TOKEN = process.env.TELEGRAM_BOT_TOKEN || ""; env.TELEGRAM_CHAT_ID = process.env.TELEGRAM_CHAT_ID || ""; - env.TELEGRAM_ALLOWED_CHAT_IDS = - process.env.TELEGRAM_ALLOWED_CHAT_IDS || process.env.TELEGRAM_CHAT_IDS || ""; applyTelegramMiniAppDefaults(env, process.env); env.KANBAN_BACKEND = process.env.KANBAN_BACKEND || "internal"; env.KANBAN_SYNC_POLICY = @@ -6568,7 +6543,7 @@ async function writeConfigFiles({ env, configJson, repoRoot, configDir }) { if (commandExists("pnpm")) { execSync("pnpm install", { cwd: __dirname, stdio: "inherit" }); } else { - execSync(`${resolveNpmCommand()} install`, { cwd: __dirname, stdio: "inherit" }); + execSync("npm install", { cwd: __dirname, stdio: "inherit" }); } success("Dependencies installed"); } catch { @@ -6648,7 +6623,7 @@ async function writeConfigFiles({ env, configJson, repoRoot, configDir }) { const portalChild = _spawn( launcher.executable, launcher.args || [], - { detached: true, stdio: "ignore", windowsHide: true }, + { detached: true, stdio: "ignore", windowsHide: false }, ); portalChild.unref(); success("Bosun portal is opening..."); @@ -6767,9 +6742,6 @@ export async function runSetup() { export { applyTelegramMiniAppDefaults, - applyTelegramChatPairing, - buildTelegramAllowedChatIds, - generateTelegramPairingCode, normalizeTelegramUiPort, extractProjectNumber, resolveOrCreateGitHubProjectNumber, @@ -6794,5 +6766,3 @@ if (process.argv[1] && resolve(process.argv[1]) === resolve(__filename_setup)) { }); } - - diff --git a/shell/claude-shell.mjs b/shell/claude-shell.mjs index 2e3fdfda4..f479b096b 100644 --- a/shell/claude-shell.mjs +++ b/shell/claude-shell.mjs @@ -5,7 +5,6 @@ * long-lived session with steering support via streaming input mode. */ -import "../infra/windows-hidden-child-processes.mjs"; import { mkdir, readFile, writeFile } from "node:fs/promises"; import { existsSync, readFileSync } from "node:fs"; import { resolve } from "node:path"; diff --git a/shell/codex-config.mjs b/shell/codex-config.mjs index d430377dc..6240fe719 100644 --- a/shell/codex-config.mjs +++ b/shell/codex-config.mjs @@ -1095,26 +1095,13 @@ const COMMON_MCP_SERVER_DEFS = [ lines: [ "[mcp_servers.microsoft-docs]", 'url = "https://learn.microsoft.com/api/mcp"', + '# NOTE: Tool list intentionally limited to avoid Azure Responses API schema-size/parser issues.', + 'tools = ["microsoft_docs_search", "microsoft_code_sample_search"]', ], isPresent: hasMicrosoftDocsMcp, }, ]; -const COMMON_MCP_SERVER_SECTION_NAMES = Object.freeze([ - "context7", - "sequential-thinking", - "playwright", - "microsoft-docs", - "microsoft_docs", -]); - -function shouldIncludeDefaultMcpServers(env = process.env) { - const raw = String(env.BOSUN_MCP_ALLOW_DEFAULT_SERVERS || "") - .trim() - .toLowerCase(); - return ["1", "true", "yes", "on", "y"].includes(raw); -} - function buildCommonMcpBlock(definition) { return [ "", @@ -1124,10 +1111,7 @@ function buildCommonMcpBlock(definition) { ].join("\n"); } -export function buildCommonMcpBlocks(env = process.env) { - if (!shouldIncludeDefaultMcpServers(env)) { - return ""; - } +export function buildCommonMcpBlocks() { return COMMON_MCP_SERVER_DEFS.map(buildCommonMcpBlock).join(""); } @@ -1137,45 +1121,6 @@ function hasNamedMcpServer(toml, name) { ); } -function stripNamedMcpSection(toml, name) { - const header = `[mcp_servers.${name}]`; - const headerIdx = toml.indexOf(header); - if (headerIdx === -1) { - return { toml, changed: false }; - } - - const lineStart = toml.lastIndexOf("\n", headerIdx); - let removeFrom = lineStart === -1 ? 0 : lineStart + 1; - const prefix = toml.slice(0, removeFrom); - const commentMatch = prefix.match(/(^|\n)# ── Common MCP servers \(added by bosun\) ──\s*\n$/); - if (commentMatch) { - removeFrom = prefix.length - commentMatch[0].length + (commentMatch[1] === "\n" ? 1 : 0); - } - - const afterHeader = headerIdx + header.length; - const nextSection = toml.indexOf("\n[", afterHeader); - const sectionEnd = nextSection === -1 ? toml.length : nextSection + 1; - const nextToml = `${toml.slice(0, removeFrom)}${toml.slice(sectionEnd)}`.replace(/\n{3,}/g, "\n\n"); - return { toml: nextToml, changed: nextToml !== toml }; -} - -export function stripCommonMcpServerBlocks(toml) { - let nextToml = String(toml || ""); - let changed = false; - for (const name of COMMON_MCP_SERVER_SECTION_NAMES) { - while (true) { - const stripped = stripNamedMcpSection(nextToml, name); - if (!stripped.changed) break; - nextToml = stripped.toml; - changed = true; - } - } - return { - toml: nextToml, - changed, - }; -} - function ensureMcpStartupTimeout(toml, name, timeoutSec = 120) { const header = `[mcp_servers.${name}]`; const headerIdx = toml.indexOf(header); @@ -1207,30 +1152,6 @@ function ensureMcpStartupTimeout(toml, name, timeoutSec = 120) { }; } -function stripUnsupportedMicrosoftDocsToolsConfig(toml) { - let nextToml = String(toml || ""); - for (const name of ["microsoft-docs", "microsoft_docs"]) { - const header = `[mcp_servers.${name}]`; - const headerIdx = nextToml.indexOf(header); - if (headerIdx === -1) continue; - - const afterHeader = headerIdx + header.length; - const nextSection = nextToml.indexOf("\n[", afterHeader); - const sectionEnd = nextSection === -1 ? nextToml.length : nextSection; - const section = nextToml.substring(afterHeader, sectionEnd); - const cleaned = section.replace( - /^\s*tools\s*=\s*\[[^\n]*\]\s*(?:\r?\n)?/gm, - "", - ); - - if (cleaned !== section) { - nextToml = - nextToml.substring(0, afterHeader) + cleaned + nextToml.substring(sectionEnd); - } - } - return nextToml; -} - function stripDeprecatedSandboxPermissions(toml) { return String(toml || "").replace( /^\s*sandbox_permissions\s*=.*(?:\r?\n)?/gim, @@ -1581,14 +1502,7 @@ function applyAgentSdkDefaults(toml, env, primarySdk, result) { return nextToml; } -function ensureCommonMcpDefaults(toml, result, env = process.env) { - if (!shouldIncludeDefaultMcpServers(env)) { - const stripped = stripCommonMcpServerBlocks(toml); - if (stripped.changed) { - result.commonMcpRemoved = true; - } - return stripped.toml; - } +function ensureCommonMcpDefaults(toml, result) { let nextToml = toml; for (const definition of COMMON_MCP_SERVER_DEFS) { if (!definition.isPresent(nextToml)) { @@ -1676,9 +1590,7 @@ function initializeCodexConfigState(result) { } return { originalToml, - toml: stripUnsupportedMicrosoftDocsToolsConfig( - stripDeprecatedSandboxPermissions(originalToml), - ), + toml: stripDeprecatedSandboxPermissions(originalToml), }; } @@ -1692,7 +1604,7 @@ function applyEnsureCodexConfigDefaults(toml, env, primarySdk, result) { result.featuresAdded = featureResult.added; nextToml = featureResult.toml; - nextToml = ensureCommonMcpDefaults(nextToml, result, env); + nextToml = ensureCommonMcpDefaults(nextToml, result); nextToml = applyModelProviderDefaults(nextToml, env, result); return { sandboxState, toml: nextToml }; @@ -1853,3 +1765,4 @@ function parseBoolEnv(value) { } + diff --git a/shell/codex-model-profiles.mjs b/shell/codex-model-profiles.mjs index 8210b587a..8956b8d4f 100644 --- a/shell/codex-model-profiles.mjs +++ b/shell/codex-model-profiles.mjs @@ -9,28 +9,6 @@ function clean(value) { return String(value ?? "").trim(); } -function trimTrailingSlashes(value) { - let normalized = String(value ?? ""); - let end = normalized.length; - while (end > 0 && normalized[end - 1] === "/") { - end -= 1; - } - return end === normalized.length ? normalized : normalized.slice(0, end); -} - -export function resolveCodexHomeDir(envInput = process.env) { - const home = - clean(envInput?.HOME) || - clean(envInput?.USERPROFILE) || - ( - clean(envInput?.HOMEDRIVE) && clean(envInput?.HOMEPATH) - ? `${clean(envInput.HOMEDRIVE)}${clean(envInput.HOMEPATH)}` - : "" - ) || - clean(homedir()); - return home; -} - function isAzureOpenAIBaseUrl(value) { try { const parsed = value instanceof URL ? value : new URL(String(value || "")); @@ -52,21 +30,12 @@ function normalizeAzureOpenAIBaseUrl(value) { parsed.pathname = "/openai/v1"; parsed.search = ""; parsed.hash = ""; - return trimTrailingSlashes(parsed.toString()); + return parsed.toString().replace(/\/+$/, ""); } catch { return raw; } } -function normalizeProviderBaseUrlForComparison(value, providerKind = "openai") { - const raw = clean(value); - if (!raw) return ""; - if (providerKind === "azure") { - return normalizeAzureOpenAIBaseUrl(raw); - } - return trimTrailingSlashes(raw); -} - function normalizeProfileName(value, fallback = DEFAULT_ACTIVE_PROFILE) { const raw = clean(value).toLowerCase(); if (!raw) return fallback; @@ -96,7 +65,7 @@ function hasEnvValue(env, key) { return Boolean(key && clean(env?.[key])); } -export function getProviderEndpointEnvKeys(sectionName, providerKind) { +function getProviderEndpointEnvKeys(sectionName, providerKind) { const normalizedName = clean(sectionName).toUpperCase().replace(/[^A-Z0-9]+/g, "_"); if (providerKind === "azure") { const keys = ["AZURE_OPENAI_ENDPOINT"]; @@ -161,9 +130,9 @@ function profileRecord(env, profileName, globalProvider) { }; } -export function readCodexConfigRuntimeDefaults(envInput = process.env) { +export function readCodexConfigRuntimeDefaults() { try { - const configPath = resolve(resolveCodexHomeDir(envInput), ".codex", "config.toml"); + const configPath = resolve(homedir(), ".codex", "config.toml"); if (!existsSync(configPath)) { return { model: "", modelProvider: "", providers: {} }; } @@ -195,8 +164,8 @@ export function readCodexConfigRuntimeDefaults(envInput = process.env) { } } -function readCodexConfigTopLevelModel(envInput = process.env) { - return readCodexConfigRuntimeDefaults(envInput).model; +function readCodexConfigTopLevelModel() { + return readCodexConfigRuntimeDefaults().model; } function selectConfigProviderForRuntime(configDefaults, env, preferredProvider = "") { @@ -210,18 +179,12 @@ function selectConfigProviderForRuntime(configDefaults, env, preferredProvider = const matchingEntries = preferred ? entries.filter((section) => section.provider === preferred) : entries; - const normalizedRuntimeBaseUrl = normalizeProviderBaseUrlForComparison( - runtimeBaseUrl, - preferred || inferProviderKindFromSection("", { baseUrl: runtimeBaseUrl }, "openai"), - ); const baseUrlMatchedEntries = runtimeBaseUrl - ? matchingEntries.filter((section) => - normalizeProviderBaseUrlForComparison(section.baseUrl, section.provider) === normalizedRuntimeBaseUrl) + ? matchingEntries.filter((section) => clean(section.baseUrl) === runtimeBaseUrl) : []; const envBackedEntries = matchingEntries.filter((section) => providerRuntimeConfigured(env, section)); const baseUrlMatchedEnvBackedEntries = runtimeBaseUrl - ? envBackedEntries.filter((section) => - normalizeProviderBaseUrlForComparison(section.baseUrl, section.provider) === normalizedRuntimeBaseUrl) + ? envBackedEntries.filter((section) => clean(section.baseUrl) === runtimeBaseUrl) : []; const preferredNames = preferred === "azure" ? ["azure"] @@ -245,9 +208,7 @@ function selectConfigProviderForRuntime(configDefaults, env, preferredProvider = ), }; const preferredMatches = !preferred || configured.provider === preferred; - const baseUrlMatches = !runtimeBaseUrl || - normalizeProviderBaseUrlForComparison(configured.baseUrl, configured.provider) - === normalizedRuntimeBaseUrl; + const baseUrlMatches = !runtimeBaseUrl || clean(configured.baseUrl) === runtimeBaseUrl; if (preferredMatches && baseUrlMatches && providerRuntimeConfigured(env, configured)) { return configured; } @@ -266,21 +227,8 @@ function selectConfigProviderForRuntime(configDefaults, env, preferredProvider = } function inferGlobalProvider(env, configDefaults = null) { - const baseUrl = clean(env.OPENAI_BASE_URL); - if (baseUrl) { - if (isAzureOpenAIBaseUrl(baseUrl)) return "azure"; - const configured = selectConfigProviderForRuntime(configDefaults, env); - if (configured && clean(configured.baseUrl) === baseUrl) { - return configured.provider; - } - return "openai"; - } - if (hasEnvValue(env, "AZURE_OPENAI_API_KEY")) { - return "azure"; - } - if (hasEnvValue(env, "OPENAI_API_KEY")) { - return "openai"; - } + const baseUrl = clean(env.OPENAI_BASE_URL).toLowerCase(); + if (isAzureOpenAIBaseUrl(baseUrl)) return "azure"; const configured = selectConfigProviderForRuntime(configDefaults, env); return configured?.provider || "openai"; } @@ -293,7 +241,7 @@ function inferGlobalProvider(env, configDefaults = null) { */ export function resolveCodexProfileRuntime(envInput = process.env) { const sourceEnv = { ...envInput }; - const configDefaults = readCodexConfigRuntimeDefaults(sourceEnv); + const configDefaults = readCodexConfigRuntimeDefaults(); const activeProfile = normalizeProfileName( sourceEnv.CODEX_MODEL_PROFILE, DEFAULT_ACTIVE_PROFILE, @@ -309,7 +257,7 @@ export function resolveCodexProfileRuntime(envInput = process.env) { const env = { ...sourceEnv }; - const configModel = readCodexConfigTopLevelModel(sourceEnv); + const configModel = readCodexConfigTopLevelModel(); if (active.model) { env.CODEX_MODEL = active.model; diff --git a/shell/codex-shell.mjs b/shell/codex-shell.mjs index 740b85cb8..bd22e550c 100644 --- a/shell/codex-shell.mjs +++ b/shell/codex-shell.mjs @@ -12,18 +12,16 @@ * thread_id so we can resume the same conversation across restarts. */ -import "../infra/windows-hidden-child-processes.mjs"; import { readFile, writeFile, mkdir, readdir } from "node:fs/promises"; import { resolve } from "node:path"; -import { fileURLToPath, pathToFileURL } from "node:url"; -import { resolveAgentSdkConfig, resolveCodexSdkInstall } from "../agent/agent-sdk.mjs"; +import { fileURLToPath } from "node:url"; +import { resolveAgentSdkConfig } from "../agent/agent-sdk.mjs"; import { loadConfig } from "../config/config.mjs"; import { maybeCompressSessionItems } from "../workspace/context-cache.mjs"; import { resolveRepoRoot } from "../config/repo-root.mjs"; import { resolveCodexProfileRuntime, readCodexConfigRuntimeDefaults, - getProviderEndpointEnvKeys, } from "./codex-model-profiles.mjs"; import { buildTaskWritableRoots } from "./codex-config.mjs"; import { @@ -129,7 +127,6 @@ function buildInjectedSandboxConfig(envInput, workingDirectory) { function buildCodexSdkRuntime(streamProviderOverrides, envInput = process.env, workingDirectory = DEFAULT_WORKING_DIRECTORY) { const resolved = resolveCodexProfileRuntime(envInput); const { env: resolvedEnv, configProvider } = resolved; - const runtimeDefaults = readCodexConfigRuntimeDefaults(envInput) || {}; const baseUrl = resolvedEnv.OPENAI_BASE_URL || ""; const isAzure = isAzureOpenAIBaseUrl(baseUrl); const hasCustomBaseUrl = Boolean(String(baseUrl || "").trim()); @@ -164,16 +161,6 @@ function buildCodexSdkRuntime(streamProviderOverrides, envInput = process.env, w if (!unsetEnvKeys.includes(otherEnvKey)) { unsetEnvKeys.push(otherEnvKey); } - // Also remove endpoint/base URL env keys associated with the non-selected provider - const endpointKeys = getProviderEndpointEnvKeys(sectionName, "azure"); - for (const epKey of endpointKeys) { - if (epKey in env) { - delete env[epKey]; - if (!unsetEnvKeys.includes(epKey)) { - unsetEnvKeys.push(epKey); - } - } - } } } catch { // best effort — do not block SDK startup if config inspection fails @@ -181,11 +168,6 @@ function buildCodexSdkRuntime(streamProviderOverrides, envInput = process.env, w } const providerName = isAzure ? "azure" : "openai"; - const providerSectionNameResolved = isAzure - ? providerSectionName - : hasCustomBaseUrl - ? (configProvider?.name || "openai-direct") - : null; const config = isAzure ? { model_providers: { @@ -203,22 +185,18 @@ function buildCodexSdkRuntime(streamProviderOverrides, envInput = process.env, w } : hasCustomBaseUrl ? { - model_providers: providerSectionNameResolved - ? { - [providerSectionNameResolved]: { - ...streamProviderOverrides, - }, - } - : undefined, + model_providers: { + [providerSectionName]: { + ...streamProviderOverrides, + }, + }, } : {}; Object.assign(config, buildInjectedSandboxConfig(envInput, workingDirectory)); - if (providerSectionNameResolved) { - config.model_provider = providerSectionNameResolved; - } - if (env.CODEX_MODEL) { + if (isAzure && env.CODEX_MODEL) { + config.model_provider = providerSectionName; config.model = env.CODEX_MODEL; } @@ -360,7 +338,6 @@ const DEFAULT_WORKING_DIRECTORY = REPO_ROOT; // ── State ──────────────────────────────────────────────────────────────────── let CodexClass = null; // The Codex class from SDK -const CODEX_SDK_SPECIFIER = "@openai/codex-sdk"; // Define the SDK specifier let codexInstance = null; // Singleton Codex instance let activeThread = null; // Current persistent Thread let activeThreadId = null; // Thread ID for resume @@ -408,17 +385,6 @@ function resolveCodexTransport() { return "auto"; } -function shouldUseBareCodexSdkImport() { - return Boolean(import.meta.vitest || process.env.VITEST); -} - -async function importCodexSdkModule(resolvedSdk) { - if (shouldUseBareCodexSdkImport()) { - return import(CODEX_SDK_SPECIFIER); - } - return import(pathToFileURL(resolvedSdk.entryPath).href); -} - // ── SDK Loading ────────────────────────────────────────────────────────────── async function loadCodexSdk() { @@ -437,14 +403,9 @@ async function loadCodexSdk() { } if (CodexClass) return CodexClass; try { - const resolvedSdk = resolveCodexSdkInstall({ extraRoots: [getWorkingDirectory()] }); - if (!resolvedSdk?.entryPath) { - console.error("[codex-shell] failed to load SDK: no complete @openai/codex-sdk install found"); - return null; - } - const mod = await importCodexSdkModule(resolvedSdk); + const mod = await import("@openai/codex-sdk"); CodexClass = mod.Codex; - console.log(`[codex-shell] SDK loaded successfully from ${resolvedSdk.rootDir}`); + console.log("[codex-shell] SDK loaded successfully"); return CodexClass; } catch (err) { console.error(`[codex-shell] failed to load SDK: ${err.message}`); @@ -580,19 +541,6 @@ You have FULL ACCESS to: - File read/write: read any file, create/edit any file - MCP servers configured in this environment (availability varies) -## File Editing Strategy — IMPORTANT - -When editing files, always prefer the Bosun MCP file tools (available via the bosun MCP server): - -1. **LOCATE first** — use \`grep_search\` to find the exact code location before editing. -2. **READ before editing** — use \`read_file\` to confirm the exact text including whitespace. -3. **PREFER surgical edits** — use \`str_replace_editor\` for targeted changes. - - \`old_str\` must exactly match the file content (copy from \`read_file\` output). - - Include more surrounding lines if the text is not unique. -4. **Full rewrites only when necessary** — use \`write_file\` for new files or complete rewrites. -5. **NEVER use shell workarounds** to edit files (no \`node -e\`, no \`sed -i\`, no temp scripts, no patch files). - These break on Windows due to encoding and quoting issues. The MCP tools handle encoding correctly. - Key files: ${REPO_ROOT} — Repository root .cache/orchestrator-status.json — Live status data (if enabled) @@ -653,9 +601,6 @@ async function getThread() { codexInstance = new Cls({ config: { ...runtime.config, - model_provider: runtime.config?.model_provider, - model_providers: runtime.config?.model_providers, - model: runtime.config?.model, features: { ...(runtime.config?.features || {}), child_agents_md: true, @@ -1363,4 +1308,3 @@ export async function initCodexShell() { ); } } - diff --git a/shell/copilot-shell.mjs b/shell/copilot-shell.mjs index 1a4db9f06..692e033ab 100644 --- a/shell/copilot-shell.mjs +++ b/shell/copilot-shell.mjs @@ -7,12 +7,11 @@ * as the primary executor. */ -import "../infra/windows-hidden-child-processes.mjs"; import { existsSync, readFileSync, appendFileSync, mkdirSync, copyFileSync } from "node:fs"; import { readFile, writeFile, mkdir } from "node:fs/promises"; import { dirname, resolve } from "node:path"; import { fileURLToPath } from "node:url"; -import { execSync as nodeExecSync } from "node:child_process"; +import { execSync } from "node:child_process"; import { createRequire } from "node:module"; import { resolveRepoRoot } from "../config/repo-root.mjs"; import { loadConfig } from "../config/config.mjs"; @@ -24,16 +23,18 @@ import { } from "../infra/stream-resilience.mjs"; import { maybeCompressSessionItems } from "../workspace/context-cache.mjs"; -function execSync(command, options = {}) { - return nodeExecSync(command, { - ...options, - windowsHide: options.windowsHide ?? (process.platform === "win32"), - }); -} - const __dirname = resolve(fileURLToPath(new URL(".", import.meta.url))); const require = createRequire(import.meta.url); +// Lazy-import MCP registry — cached at module scope per AGENTS.md rules. +let _mcpRegistry = null; +async function getMcpRegistry() { + if (!_mcpRegistry) { + _mcpRegistry = await import("../workflow/mcp-registry.mjs"); + } + return _mcpRegistry; +} + // ── Configuration ──────────────────────────────────────────────────────────── const DEFAULT_TIMEOUT_MS = 60 * 60 * 1000; // 60 min for agentic tasks @@ -75,10 +76,6 @@ function getMcpRuntimeConfig() { } } -function shouldAllowExternalMcpSources(mcpCfg = getMcpRuntimeConfig()) { - return mcpCfg?.allowExternalSources === true; -} - function resolveCopilotTransport() { const raw = String(process.env.COPILOT_TRANSPORT || "auto") .trim() @@ -316,6 +313,33 @@ async function buildCliArgs() { args.push("--additional-mcp-config", mcpConfigPath); } + // Also write a temp MCP config from the library if installed servers exist + // (non-fatal: library MCP is a convenience, not a hard requirement) + if (!mcpConfigPath) { + try { + const registry = await getMcpRegistry(); + const mcpCfg = getMcpRuntimeConfig(); + const installed = await registry.listInstalledMcpServers(REPO_ROOT); + const ids = installed && installed.length ? installed.map((e) => e.id) : []; + let resolved = await registry.resolveMcpServersForAgent(REPO_ROOT, ids); + if (typeof registry.wrapServersWithDiscoveryProxy === "function") { + resolved = registry.wrapServersWithDiscoveryProxy(REPO_ROOT, resolved, { + enabled: mcpCfg.useDiscoveryProxy !== false, + includeCustomTools: mcpCfg.includeCustomToolsInDiscoveryProxy !== false, + cacheTtlMs: mcpCfg.discoveryProxyCacheTtlMs, + executeTimeoutMs: mcpCfg.discoveryProxyExecuteTimeoutMs, + }); + } + if (resolved && resolved.length) { + const tmpPath = registry.writeTempCopilotMcpConfig(REPO_ROOT, resolved); + args.push("--additional-mcp-config", tmpPath); + console.log(`[copilot-shell] injected ${resolved.length} library MCP server(s) via CLI args`); + } + } catch (err) { + console.warn(`[copilot-shell] failed to inject library MCP servers into CLI args: ${err.message}`); + } + } + if (args.length > 0) { console.log(`[copilot-shell] cliArgs: ${args.join(" ")}`); } @@ -712,9 +736,6 @@ function loadMcpServersFromFile(path) { } function loadMcpServers(profile = null) { - if (!shouldAllowExternalMcpSources()) { - return null; - } if (profile?.mcpServers) return profile.mcpServers; if (profile?.mcpConfig) { return loadMcpServersFromFile(profile.mcpConfig); @@ -728,6 +749,53 @@ function loadMcpServers(profile = null) { return loadMcpServersFromFile(configPath); } +/** + * Merge installed MCP library servers into an existing mcpServers map. + * Called during session build to inject library-managed MCP servers into + * the Copilot SDK session alongside any profile/env servers. + * + * Non-fatal: if the registry is unavailable or encounters errors, the + * original servers map is returned unchanged. + * + * @param {Object|null} existingServers — mcpServers from profile/env/config + * @returns {Promise} — merged servers map + */ +async function mergeLibraryMcpServers(existingServers) { + try { + const registry = await getMcpRegistry(); + const mcpCfg = getMcpRuntimeConfig(); + const installed = await registry.listInstalledMcpServers(REPO_ROOT); + const installedIds = installed && installed.length ? installed.map((e) => e.id) : []; + + // Resolve all installed servers into full configs + let resolved = await registry.resolveMcpServersForAgent(REPO_ROOT, installedIds); + if (typeof registry.wrapServersWithDiscoveryProxy === "function") { + resolved = registry.wrapServersWithDiscoveryProxy(REPO_ROOT, resolved, { + enabled: mcpCfg.useDiscoveryProxy !== false, + includeCustomTools: mcpCfg.includeCustomToolsInDiscoveryProxy !== false, + cacheTtlMs: mcpCfg.discoveryProxyCacheTtlMs, + executeTimeoutMs: mcpCfg.discoveryProxyExecuteTimeoutMs, + }); + } + if (!resolved || !resolved.length) return existingServers; + + // Convert to Copilot mcpServers format: { [id]: { command, args, env? } | { url } } + const copilotJson = registry.buildCopilotMcpJson(resolved); + const libraryServers = copilotJson?.mcpServers || {}; + if (!Object.keys(libraryServers).length) return existingServers; + + // Merge: existing servers take precedence over library ones (user overrides win) + const merged = { ...libraryServers, ...(existingServers || {}) }; + console.log( + `[copilot-shell] Merged ${Object.keys(libraryServers).length} library MCP server(s) into session`, + ); + return merged; + } catch (err) { + console.warn(`[copilot-shell] Failed to merge library MCP servers: ${err.message}`); + return existingServers; + } +} + async function buildSessionConfig() { const profile = resolveCopilotProfile(); const config = { @@ -763,9 +831,9 @@ async function buildSessionConfig() { config.reasoningEffort = effort.toLowerCase(); } - // Only explicit profile/env/config MCP sources are loaded here. - // Bosun-managed per-agent MCP selection is injected by the agent pool. - const mcpServers = loadMcpServers(profile); + // Load MCP servers from profile/env/config, then merge library-managed servers + const baseServers = loadMcpServers(profile); + const mcpServers = await mergeLibraryMcpServers(baseServers); if (mcpServers) config.mcpServers = mcpServers; return config; } diff --git a/shell/gemini-shell.mjs b/shell/gemini-shell.mjs index de145e9ae..28e610005 100644 --- a/shell/gemini-shell.mjs +++ b/shell/gemini-shell.mjs @@ -11,7 +11,6 @@ * cli -> CLI only */ -import "../infra/windows-hidden-child-processes.mjs"; import { spawn } from "node:child_process"; import { mkdir, readFile, writeFile } from "node:fs/promises"; import { resolve } from "node:path"; diff --git a/shell/opencode-providers.mjs b/shell/opencode-providers.mjs index dbda32bf9..2fab4cf29 100644 --- a/shell/opencode-providers.mjs +++ b/shell/opencode-providers.mjs @@ -1,4 +1,3 @@ -// CLAUDE:SUMMARY — discovers OpenCode providers/models via SDK and CLI fallbacks, normalizing snapshots and tolerating ignorable model-listing failures. /** * opencode-providers.mjs — Dynamic OpenCode provider & model discovery * @@ -13,34 +12,10 @@ */ import { execFile, exec } from "node:child_process"; +import { promisify } from "node:util"; -function execFileAsync(...args) { - return new Promise((resolve, reject) => { - execFile(...args, (error, stdout = "", stderr = "") => { - if (error) { - if (stdout !== undefined) error.stdout = stdout; - if (stderr !== undefined) error.stderr = stderr; - reject(error); - return; - } - resolve({ stdout, stderr }); - }); - }); -} - -function execAsync(...args) { - return new Promise((resolve, reject) => { - exec(...args, (error, stdout = "", stderr = "") => { - if (error) { - if (stdout !== undefined) error.stdout = stdout; - if (stderr !== undefined) error.stderr = stderr; - reject(error); - return; - } - resolve({ stdout, stderr }); - }); - }); -} +const execFileAsync = promisify(execFile); +const execAsync = promisify(exec); // ── Module-scope cache (lives at module scope per AGENTS.md) ────────────────── @@ -106,14 +81,13 @@ function shouldRetryProviderQueryWithoutDirectory(err) { if (status === 400) return true; const message = String(err?.message || "").toLowerCase(); - const stderrText = String(err?.stderr || "").toLowerCase(); const responseText = String( err?.response?.data?.error?.message || err?.response?.data?.message || err?.cause?.message || "", ).toLowerCase(); - const haystack = `${message} ${stderrText} ${responseText}`; + const haystack = `${message} ${responseText}`; return ( haystack.includes(" 400") || haystack.includes("failed to list models: 400") || @@ -129,22 +103,19 @@ function shouldRetryProviderQueryWithoutDirectory(err) { function isIgnorableModelDiscoveryError(err) { if (!err) return false; - if (isIgnorableModelDiscoveryCause(err?.cause)) return true; - const status = Number( err?.status ?? err?.response?.status ?? err?.cause?.status ?? NaN, ); if (status === 400) return true; const message = String(err?.message || "").toLowerCase(); - const stderrText = String(err?.stderr || "").toLowerCase(); const responseText = String( err?.response?.data?.error?.message || err?.response?.data?.message || err?.cause?.message || "", ).toLowerCase(); - const haystack = `${message} ${stderrText} ${responseText}`; + const haystack = `${message} ${responseText}`; return ( haystack.includes("failed to list models: 400") || haystack.includes("bad request") || @@ -156,35 +127,6 @@ function isIgnorableModelDiscoveryError(err) { ); } -function isIgnorableModelDiscoveryCause(cause) { - if (!cause) return false; - - const code = String(cause?.code || "").toUpperCase(); - const statusCode = Number(cause?.statusCode ?? cause?.status ?? cause?.response?.status ?? cause?.response?.statusCode ?? cause?.response?.data?.status ?? cause?.response?.data?.statusCode ?? NaN); - if (statusCode === 400) return true; - - const bodyText = String( - cause?.body - ?? cause?.responseBody - ?? cause?.response?.body - ?? cause?.response?.data?.error?.message - ?? cause?.response?.data?.message - ?? "", - ).toLowerCase(); - const message = String(cause?.message || "").toLowerCase(); - const haystack = `${code} ${message} ${bodyText}`; - return ( - haystack.includes("failed to list models: 400") || - haystack.includes(" 400") || - haystack.includes("status code 400") || - haystack.includes("bad request") || - haystack.includes("/models") || - haystack.includes("invalid url") || - haystack.includes("deployment") || - haystack.includes("api version") - ); -} - function buildEmptySnapshot() { return { providers: [], @@ -196,56 +138,6 @@ function buildEmptySnapshot() { }; } -function hasIgnorableCliStderr(text) { - const stderrText = String(text || "").trim(); - if (!stderrText) return false; - return isIgnorableModelDiscoveryError({ - message: stderrText, - stderr: stderrText, - }); -} - -function hasIgnorableModelDiscoverySignal(err) { - if (isIgnorableModelDiscoveryError(err)) return true; - - const stderrText = String(err?.stderr || "").trim(); - const stdoutText = String(err?.stdout || "").trim(); - return ( - !stdoutText - && !!stderrText - && isIgnorableModelDiscoveryError({ - message: stderrText, - stderr: stderrText, - status: err?.status, - response: err?.response, - cause: err?.cause, - }) - ); -} - -function hasIgnorableModelDiscoveryText(stdout = "", stderr = "") { - const stdoutText = String(stdout || "").trim(); - const stderrText = String(stderr || "").trim(); - if (!stderrText) return false; - - return isIgnorableModelDiscoveryError({ - message: !stdoutText ? stderrText : "", - stdout: stdoutText, - stderr: stderrText, - }); -} - -function extractRecoverySnapshot(err) { - const payload = err?.response?.data; - if (!payload || typeof payload !== "object") return null; - try { - const snapshot = normalizeSDKProviders(payload); - return isEmptySnapshot(snapshot) ? null : snapshot; - } catch { - return null; - } -} - function isEmptySnapshot(snapshot) { return Boolean( snapshot @@ -320,12 +212,16 @@ async function invokeProviderEndpoint(endpoint, requestOptions, context = null) return await callEndpoint(requestOptions); } catch (err) { if (!shouldRetryProviderQueryWithoutDirectory(err)) { - throw err; + return null; } } } - return await callEndpoint(); + try { + return await callEndpoint(); + } catch { + return null; + } } function normalizeProviderListData(data) { @@ -394,42 +290,28 @@ async function discoverViaSDK(existingClient = null) { ? { query: { directory } } : undefined; - const providerPromise = invokeProviderEndpoint(client?.provider?.list, requestOptions, client?.provider); - const authPromise = invokeProviderEndpoint(client?.provider?.auth, requestOptions, client?.provider); - const [providerResult, authResult] = await Promise.allSettled([providerPromise, authPromise]); - - const normalizedProviderData = normalizeProviderListData( - providerResult.status === "fulfilled" - ? providerResult.value?.data - : providerResult.reason?.response?.data || providerResult.reason?.cause?.response?.data, - ); - if (!normalizedProviderData) { - const providerError = providerResult.status === "rejected" ? providerResult.reason : null; - const recoveredSnapshot = extractRecoverySnapshot(providerError); - if (recoveredSnapshot) { - console.warn("[opencode-providers] recovering provider metadata from SDK error payload"); - return recoveredSnapshot; - } - if (providerError) { - if (hasIgnorableModelDiscoverySignal(providerError)) { - console.warn(`[opencode-providers] SDK discovery hit ignorable provider error: ${providerError.message}`); - } else { - console.warn(`[opencode-providers] SDK discovery failed: ${providerError.message}`); - } - } - return null; - } + // Fetch provider list + auth methods in parallel + const [providerRes, authRes] = await Promise.all([ + invokeProviderEndpoint(client?.provider?.list, requestOptions, client?.provider), + invokeProviderEndpoint(client?.provider?.auth, requestOptions, client?.provider), + ]); - if (providerResult.status === "rejected") { - console.warn("[opencode-providers] recovering provider metadata from SDK error payload"); - } + const normalizedProviderData = normalizeProviderListData(providerRes?.data); + if (!normalizedProviderData) return null; - const authMethods = authResult.status === "fulfilled" - ? (authResult.value?.data || {}) - : {}; + const authMethods = authRes?.data || {}; return buildSnapshotFromNormalizedProviderData(normalizedProviderData, authMethods); } catch (err) { console.warn(`[opencode-providers] SDK discovery failed: ${err.message}`); + + const fallbackProviderData = normalizeProviderListData( + err?.response?.data || err?.cause?.response?.data, + ); + if (fallbackProviderData) { + console.warn("[opencode-providers] recovering provider metadata from SDK error payload"); + return buildSnapshotFromNormalizedProviderData(fallbackProviderData, {}); + } + return null; } } @@ -459,46 +341,26 @@ async function execOpencode(args, execOpts = {}) { timeout: 30_000, maxBuffer: 10 * 1024 * 1024, encoding: "utf-8", - windowsHide: process.platform === "win32", ...execOpts, }; const escaped = args.map((a) => `"${a}"`).join(" "); - const commandText = `"${bin}" ${escaped}`; if (isWindows) { // Use exec() on Windows to properly handle .cmd wrappers - const result = await execAsync(commandText, baseOpts); - const normalized = typeof result === "string" + const result = await execAsync(`"${bin}" ${escaped}`, baseOpts); + return typeof result === "string" ? { stdout: result, stderr: "" } : { stdout: result.stdout || "", stderr: result.stderr || "" }; - if (!normalized.stdout.trim() && normalized.stderr.trim() && !hasIgnorableModelDiscoveryText(normalized.stdout, normalized.stderr)) { - const err = new Error(normalized.stderr.trim()); - err.stderr = normalized.stderr; - throw err; - } - return normalized; } try { const result = await execFileAsync(bin, args, baseOpts); - const normalized = typeof result === "string" + return typeof result === "string" ? { stdout: result, stderr: "" } : { stdout: result.stdout || "", stderr: result.stderr || "" }; - if (!normalized.stdout.trim() && normalized.stderr.trim() && !hasIgnorableModelDiscoveryText(normalized.stdout, normalized.stderr)) { - const err = new Error(normalized.stderr.trim()); - err.stderr = normalized.stderr; - throw err; - } - return normalized; } catch { - const result = await execAsync(commandText, baseOpts); - const normalized = typeof result === "string" + const result = await execAsync(`"${bin}" ${escaped}`, baseOpts); + return typeof result === "string" ? { stdout: result, stderr: "" } : { stdout: result.stdout || "", stderr: result.stderr || "" }; - if (!normalized.stdout.trim() && normalized.stderr.trim() && !hasIgnorableModelDiscoveryText(normalized.stdout, normalized.stderr)) { - const err = new Error(normalized.stderr.trim()); - err.stderr = normalized.stderr; - throw err; - } - return normalized; } } @@ -673,9 +535,9 @@ async function discoverViaCLI() { // fall through to the original verbose failure below } - if (hasIgnorableModelDiscoverySignal(err)) { + if (isIgnorableModelDiscoveryError(err)) { console.warn( - `[opencode-providers] CLI model discovery hit ignorable provider error with no basic fallback data: ${err.message}` , + `[opencode-providers] skipping CLI model discovery after provider returned HTTP 400: ${err.message}` , ); return buildEmptySnapshot(); } @@ -729,9 +591,9 @@ async function discoverAllViaCLI() { // fall through to the original verbose failure below } - if (hasIgnorableModelDiscoverySignal(err)) { + if (isIgnorableModelDiscoveryError(err)) { console.warn( - `[opencode-providers] catalog discovery hit ignorable provider error with no basic fallback data: ${err.message}`, + `[opencode-providers] skipping catalog discovery after provider returned HTTP 400: ${err.message}`, ); return buildEmptySnapshot(); } @@ -763,31 +625,12 @@ export async function discoverProviders(opts = {}) { } // Try SDK first (requires running server) - let snapshot = null; - try { - snapshot = await discoverViaSDK(client); - } catch (err) { - const recoveredSnapshot = extractRecoverySnapshot(err); - if (recoveredSnapshot) { - snapshot = recoveredSnapshot; - } - if (!hasIgnorableModelDiscoverySignal(err)) { - throw err; - } - if (!recoveredSnapshot) { - console.warn( - `[opencode-providers] SDK discovery hit ignorable provider error; falling back to CLI: ${err.message}`, - ); - snapshot = null; - } - } + let snapshot = await discoverViaSDK(client); - const sdkDiscoveryFailed = snapshot == null; + const sdkSnapshotWasEmpty = isEmptySnapshot(snapshot); - // Fall back to CLI only when SDK discovery was unavailable or failed. - // A successful-but-empty SDK response is authoritative for older SDK - // compatibility probes and disconnected environments. - if (sdkDiscoveryFailed) { + // Fall back to CLI + if (!snapshot || sdkSnapshotWasEmpty) { snapshot = await discoverViaCLI(); } @@ -926,11 +769,3 @@ export function invalidateCache() { - - - - - - - - diff --git a/shell/opencode-shell.mjs b/shell/opencode-shell.mjs index 1841a32b0..7df9a5ac9 100644 --- a/shell/opencode-shell.mjs +++ b/shell/opencode-shell.mjs @@ -14,7 +14,6 @@ * Server: opencode binary on PATH (https://opencode.ai) */ -import "../infra/windows-hidden-child-processes.mjs"; import { mkdir, readFile, writeFile } from "node:fs/promises"; import { resolve } from "node:path"; import { fileURLToPath } from "node:url"; diff --git a/shell/pwsh-runtime.mjs b/shell/pwsh-runtime.mjs index 1906c6895..d0460836f 100644 --- a/shell/pwsh-runtime.mjs +++ b/shell/pwsh-runtime.mjs @@ -1,19 +1,12 @@ import { existsSync } from "node:fs"; import { dirname, resolve } from "node:path"; import { fileURLToPath } from "node:url"; -import { execSync as nodeExecSync } from "node:child_process"; +import { execSync } from "node:child_process"; const __dirname = dirname(fileURLToPath(import.meta.url)); const BUNDLED_PWSH_PATH = resolve(__dirname, "..", ".cache", "bosun", "pwsh", "pwsh"); const BUNDLED_PWSH_WINDOWS_PATH = `${BUNDLED_PWSH_PATH}.exe`; -function execSync(command, options = {}) { - return nodeExecSync(command, { - ...options, - windowsHide: options.windowsHide ?? (process.platform === "win32"), - }); -} - function commandExists(cmd) { try { execSync(`${process.platform === "win32" ? "where" : "which"} ${cmd}`, { diff --git a/site/AGENTS.md b/site/AGENTS.md index 1557c060b..565ee1544 100644 --- a/site/AGENTS.md +++ b/site/AGENTS.md @@ -14,7 +14,5 @@ Public website and docs pages (`bosun.engineer`) static assets. - Demo/smoke failures -> `site/ui/demo.html`, CI/test hooks. ## Validation -- Run `npm run syntax:check` after changing any file in `site/ui/`; the hook now validates browser import graphs, not just parse errors. -- Keep `site/ui/` helper/module copies in sync with their `ui/` counterparts when the hosted demo imports them directly. - Build docs: `npm run build:docs` - Full checks: `npm test && npm run build` diff --git a/site/docs/architecture.html b/site/docs/architecture.html index 70c3e8419..2e2176d3d 100644 --- a/site/docs/architecture.html +++ b/site/docs/architecture.html @@ -27,8 +27,8 @@ Bosun