diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..6fb5550 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,91 @@ +### Dependencies and Caches +/node_modules/ +/.npm/ +/.pnpm-store/ +/.eslintcache +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/sdks +!.yarn/versions +.yarnrc +.yarnrc.yml + +### Build and Runtime Artifacts +/dist/ +/build/ +/out/ +/tmp/ +*.tsbuildinfo +*.pid +*.pid.lock + +### Logs and Test Reports +/logs/ +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +/coverage/ +/.nyc_output/ +junit.xml +coverage-final.json + +### Environment Variables +.env +.env.* +!.env.example +.envrc +/.direnv/ + +### IDE, Editor, and System Files +/.vscode/ +/.idea/ +/.fleet/ +/.history/ +*.iml +nodemon.json +.DS_Store +Thumbs.db +*~ +*.swp +*.swo + +### Auxiliary Tooling Artifacts +/__pycache__/ +*.py[cod] +/.pytest_cache/ +/.mypy_cache/ +/.venv/ +/venv/ +/.tox/ +*.out +*.o +*.obj +*.so +*.a +*.dll +*.exe + +### Project-Specific Ignores +pyproject.toml +.pre-commit-config.yaml +README.md +ROADMAP.md +LICENSE +CODE_OF_CONDUCT.md +CONTRIBUTING.md +BINHARIC.md +AGENT.md +/docs/ +/tests/ +vitest.config.ts +tsconfig.spec.json +*.png +*.jpg +*.jpeg +*.gif +*.ico +*.svg diff --git a/.github/workflows/lints.yml b/.github/workflows/lints.yml index 4ca6981..ed7d549 100644 --- a/.github/workflows/lints.yml +++ b/.github/workflows/lints.yml @@ -6,8 +6,6 @@ on: branches: - main push: - branches: - - main tags: - "v*" diff --git a/.github/workflows/publish_docker.yml b/.github/workflows/publish_docker.yml new file mode 100644 index 0000000..8fc6d6c --- /dev/null +++ b/.github/workflows/publish_docker.yml @@ -0,0 +1,77 @@ +name: Publish Docker Image to GHCR + +on: + workflow_dispatch: + push: + tags: + - "v*" + +permissions: + contents: read + packages: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + call_tests: + uses: ./.github/workflows/tests.yml + + build-and-push: + runs-on: ubuntu-latest + needs: call_tests + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract Docker Metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ github.repository }} + tags: | + type=ref,event=branch + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=raw,value=latest,enable={{is_default_branch}} + + - name: Set Fallback Tag (latest) + id: fallback + run: | + if [ -z "${{ steps.meta.outputs.tags }}" ]; then + echo "tags=ghcr.io/${{ github.repository }}:latest" >> $GITHUB_OUTPUT + else + first_tag=$(echo "${{ steps.meta.outputs.tags }}" | head -n1) + echo "tags=${first_tag}" >> $GITHUB_OUTPUT + fi + + - name: Build and Push + uses: docker/build-push-action@v6 + with: + context: . + file: ./Dockerfile + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.fallback.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + provenance: false diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..b670c37 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +# --- Build Stage --- +FROM node:20-alpine AS builder +WORKDIR /app +COPY package*.json ./ + +# Install dependencies, ignoring peer conflicts +RUN npm ci --legacy-peer-deps +COPY tsconfig.json ./ +COPY src ./src + +# Build the application +RUN npm run build + +# --- Runtime Stage --- +FROM node:20-alpine AS runtime +RUN apk add --no-cache bash +WORKDIR /app +ENV NODE_ENV=production +COPY package*.json ./ + +# Install production dependencies only +RUN npm ci --omit=dev --legacy-peer-deps + +# Copy built application from the build stage +COPY --from=builder /app/dist ./dist + +# Set the container's entrypoint +ENTRYPOINT ["node","dist/cli.js"] diff --git a/Makefile b/Makefile index 9bf43d6..475e43b 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,9 @@ PACKAGE_MANAGER ?= npm NODE_MODULES_DIR ?= node_modules REMOVABLE_THINGS ?= .vitest-cache coverage site +DOCKER_IMAGE_NAME ?= binharic-cli +DOCKER_IMAGE_TAG ?= latest +DOCKER_CONTAINER_ARGS ?= # ============================================================================== # SETUP & CHECKS @@ -22,7 +25,8 @@ check-deps: # Declare all targets as phony (not files) .PHONY: help install check-deps test coverage lint lint-fix format typecheck build run clean reset setup-hooks \ - test-hooks npm-login npm-whoami pack pack-dry-run publish publish-dry-run version-patch version-minor version-major + test-hooks npm-login npm-whoami pack pack-dry-run publish publish-dry-run version-patch version-minor version-major \ + docker-image docker-run .DEFAULT_GOAL := help @@ -84,7 +88,7 @@ test-hooks: ## Test Git hooks on all files @pre-commit run --all-files --show-diff-on-failure # ============================================================================== -# PUBLISHING +# PUBLISHING TO NPM # ============================================================================== npm-login: ## Log in to npm registry @$(PACKAGE_MANAGER) login @@ -112,3 +116,15 @@ version-minor: ## Bump minor version (x.y.z -> x.(y+1).0) version-major: ## Bump major version ((x+1).0.0) @$(PACKAGE_MANAGER) version major + +# ============================================================================== +# DOCKER +# ============================================================================== + +docker-image: ## Build the Docker image + @echo "Building Docker image: $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + @docker build -t $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG) . + +docker-run: ## Run the application in a Docker container + @echo "Running Docker image: $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG) with args: $(DOCKER_CONTAINER_ARGS)" + @docker run --rm -it $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG) $(DOCKER_CONTAINER_ARGS) diff --git a/README.md b/README.md index 55ac603..de00298 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,8 @@ like the ability to analyze projects, run tests, find bugs, and perform code rev - Can use models from OpenAI, Google, Anthropic, and Ollama - Is fully customizable (like customizing system prompt) - Comes with a built-in retrieval-augmented generation (RAG) pipeline -- Comes with a large set of built-in tools (like reading and writing files); can use external tools via MCP +- Comes with a large set of built-in tools (like reading and writing files) +- Can use external tools via Model Context Protocol (MCP) - Comes with built-in workflows for standard software development tasks (like debugging and code review) See the [ROADMAP.md](ROADMAP.md) for the list of implemented and planned features. @@ -71,6 +72,11 @@ binharic [![asciicast](https://asciinema.org/a/vDae95b1lm20X7HGSlcVe3M6C.svg)](https://asciinema.org/a/vDae95b1lm20X7HGSlcVe3M6C) +> [!NOTE] +> The performance of a coding agent like Binharic, to a great extent, depends on the model it uses. +> So, it's recommended to use state-of-the-art models (like Claude Sonnet 4.5, GPT-5, and Gemini 2.5 Pro) for the best +> results. + --- #### Documentation diff --git a/ROADMAP.md b/ROADMAP.md index b5d9b03..2238872 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -103,7 +103,7 @@ It includes planned features, improvements, and their current implementation sta - [x] File search with @ mention - [x] Non-blocking UI during LLM responses - [x] Command syntax highlighting (partial match in yellow, full match in cyan) - - [x] Colored help menu items\*\* + - [x] Colored help menu items** - [x] Clean message display (no "Binharic:" prefix) - [x] Dynamic username from system (not hardcoded) - [x] Tool results hidden from UI (only failures shown) @@ -117,6 +117,7 @@ It includes planned features, improvements, and their current implementation sta - [x] Git branch display - [x] Responsive input field (non-blocking) - [x] Clear error messages for tool failures + - [x] Exit summary screen on quit (session ID, tool calls, success rate, timings, model usage) - [ ] Progress bars for long operations - [ ] Notification system - [ ] Undo/redo for file operations @@ -154,6 +155,7 @@ It includes planned features, improvements, and their current implementation sta - [x] Tool execution timeout protection (10 seconds for autofix) - [ ] Error recovery suggestions - [ ] Automatic error reporting (opt-in) + - [ ] Configurable stderr suppression via env flag (planned) - **Optimization** - [x] Efficient token counting - [x] Context window optimization @@ -169,7 +171,8 @@ It includes planned features, improvements, and their current implementation sta - [x] Provider availability checks - [x] Detailed tool execution logging - [x] Autofix attempt tracking - - [ ] Performance metrics collection + - [x] Basic session metrics rendered on exit (LLM API time, tool time, request counts) + - [ ] Persistent performance metrics collection - [ ] Usage analytics (tokens, costs) - [ ] Health checks and diagnostics @@ -205,6 +208,7 @@ It includes planned features, improvements, and their current implementation sta - [ ] Comprehensive user guide - [ ] Video tutorials - [ ] FAQ section + - [ ] Docker/Container usage guide (planned) - **Developer Documentation** - [x] Code of conduct - [x] Architecture documentation @@ -218,14 +222,18 @@ It includes planned features, improvements, and their current implementation sta - **Package Management** - [x] NPM package structure - [x] TypeScript compilation - - [ ] NPM registry publication - - [ ] Semantic versioning - - [ ] Release automation + - [x] NPM registry publication + - [x] Semantic versioning (via git tags) + - [x] Release automation (GitHub Actions: npm + GHCR) - **Installation Methods** - [ ] Homebrew formula (macOS) - [ ] Snap package (Linux) - [ ] Chocolatey package (Windows) - - [ ] Docker image + - [x] Docker image + - Published to GitHub Container Registry: `ghcr.io//` + - Multi-arch builds (linux/amd64, linux/arm64) via Buildx + - Makefile targets for local and CI builds/pushes + - Optimized build context via comprehensive `.dockerignore` - [ ] Standalone binary releases - **Cloud and Remote** - [ ] Remote execution support @@ -242,7 +250,7 @@ It includes planned features, improvements, and their current implementation sta - [x] Multi-step tool execution with automatic loop control - [x] Specialized agents with distinct personalities - [ ] onStepFinish callbacks for monitoring - - [ ] prepareStep callbacks for dynamic configuration\*\* + - [ ] prepareStep callbacks for dynamic configuration** - [ ] Multiple stopping conditions (step count, budget, errors, validation, completion) - [ ] Goal-oriented planning - [ ] Task decomposition diff --git a/docs/assets/diagrams/agentic_workflow.dot b/docs/assets/diagrams/agentic_workflow.dot new file mode 100644 index 0000000..fdfb4e4 --- /dev/null +++ b/docs/assets/diagrams/agentic_workflow.dot @@ -0,0 +1,79 @@ +digraph AgenticWorkflow { + // --- Graph Settings (Updated Style) --- + graph [ +rankdir=LR, +label="AI Agentic Workflow", +fontsize=22, +fontname="Helvetica-Bold,Arial-Bold,sans-serif", +fontcolor="#333333", +labelloc=t, +compound=true, +bgcolor="#F8F9FA", +splines=ortho, +nodesep=0.6, +ranksep=1.2 +]; + +// --- Default Node & Edge Styles (from example) --- + node [ +fontname="Helvetica,Arial,sans-serif", +shape=box, +style="filled,rounded", +color="lightblue", // Border color + fillcolor="white", // Default fill color + penwidth=2 +]; +edge [ +fontname="Helvetica,Arial,sans-serif", +color="black", +arrowhead=vee, +fontsize=10 +]; + +// --- Node Definitions (with new colors) --- + Start [ +shape=circle, +label="Start", +fontname="Helvetica-Bold,Arial-Bold,sans-serif" +]; +End [ +shape=doublecircle, +label="End", +fontname="Helvetica-Bold,Arial-Bold,sans-serif" +]; + +HumanLoop [ +label="User Feedback", +fillcolor="lightgreen" +]; +AIModel [ +label="AI Model", +fillcolor="lightpink" +]; + +// --- Agentic Loop Cluster (styled like example) --- + subgraph cluster_agentic_loop { +label = "Agentic Loop"; +style = "dashed"; +color = "lightgrey"; +fontname="Helvetica-Bold,Arial-Bold,sans-serif"; + +// Nodes inside the cluster get a yellow fill + node [fillcolor="lightyellow"]; + +Plan; +Execute; +Check; + +Plan -> Execute -> Check; +} + +// --- Layout and Workflow Connections --- + HumanLoop -> Execute -> AIModel [style=invis, minlen=1]; + +Start -> Plan [lhead=cluster_agentic_loop]; +Check -> End [ltail=cluster_agentic_loop]; + +HumanLoop -> Plan [lhead=cluster_agentic_loop, constraint=false, xlabel=" Feedback "]; +Execute -> AIModel [ltail=cluster_agentic_loop, constraint=false, xlabel="Uses "]; +} diff --git a/docs/assets/diagrams/agentic_workflow_v0.1.0.svg b/docs/assets/diagrams/agentic_workflow_v0.1.0.svg new file mode 100644 index 0000000..57fcb4a --- /dev/null +++ b/docs/assets/diagrams/agentic_workflow_v0.1.0.svg @@ -0,0 +1,355 @@ + + + + + + + + + + AI Agentic Workflow + + cluster_agentic_loop + + Agentic Loop + + + Start + + Start + + + Plan + + Plan + + + Start->Plan + + + + + End + + + End + + + HumanLoop + + User Feedback + + + HumanLoop->Plan + + +  Feedback   + + + Execute + + Execute + + + AIModel + + AI Model + + + Plan->Execute + + + + + Execute->AIModel + + + Uses   + + + Check + + Check + + + Execute->Check + + + + + Check->End + + + + diff --git a/docs/assets/diagrams/agentic_workflow_v0.2.0.svg b/docs/assets/diagrams/agentic_workflow_v0.2.0.svg new file mode 100644 index 0000000..75d4ba6 --- /dev/null +++ b/docs/assets/diagrams/agentic_workflow_v0.2.0.svg @@ -0,0 +1,344 @@ + + + + + + + + + + AI Agentic Workflow + + cluster_agentic_loop + + Agentic Loop + + + Start + + Start + + + Plan + + Plan + + + Start->Plan + + + + + End + + + End + + + HumanLoop + + User Feedback + + + HumanLoop->Plan + + +  Feedback   + + + Execute + + Execute + + + AIModel + + AI Model + + + Plan->Execute + + + + + Execute->AIModel + + + Uses   + + + Check + + Check + + + Execute->Check + + + + + Check->End + + + + diff --git a/docs/assets/diagrams/make_figures.sh b/docs/assets/diagrams/make_figures.sh new file mode 100644 index 0000000..9f679a2 --- /dev/null +++ b/docs/assets/diagrams/make_figures.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# You need to have Graphviz installed to run this script +# On Debian-based systems, you can install it using: sudo apt-get install graphviz + +# Make figures from .dot files +for f in *.dot; do + dot -Tsvg $f -o ${f%.dot}.svg +done diff --git a/src/agent/core/outputStyles.ts b/src/agent/core/outputStyles.ts new file mode 100644 index 0000000..3102900 --- /dev/null +++ b/src/agent/core/outputStyles.ts @@ -0,0 +1,80 @@ +import type { Config } from "@/config.js"; + +export type OutputStyle = "default" | "explanatory" | "learning" | "concise" | "verbose"; + +export interface OutputStyleConfig { + name: OutputStyle; + systemPromptAddition: string; + description: string; +} + +export const OUTPUT_STYLES: Record = { + default: { + name: "default", + systemPromptAddition: "", + description: "Standard interaction mode", + }, + explanatory: { + name: "explanatory", + systemPromptAddition: ` +You should be highly educational in your responses. When making implementation choices: +- Explain WHY you chose a particular approach +- Discuss alternative solutions you considered +- Point out trade-offs in your decisions +- Reference best practices and design patterns +- Help the user understand the reasoning behind your actions + +Think of yourself as a mentor teaching through action.`, + description: "Educational mode - explains implementation choices and reasoning", + }, + learning: { + name: "learning", + systemPromptAddition: ` +You should work collaboratively with the user to help them learn: +- Break down complex tasks into smaller, manageable steps +- Ask the user to implement simpler parts themselves while you handle complex ones +- Provide hints and guidance rather than complete solutions when appropriate +- Explain concepts as you go +- Verify the user's understanding before proceeding + +The goal is active learning - keep the user engaged and coding alongside you.`, + description: "Collaborative learning mode - guides user to implement parts themselves", + }, + concise: { + name: "concise", + systemPromptAddition: ` +Be extremely concise and to-the-point: +- Minimize explanations unless asked +- Focus on getting work done efficiently +- Only mention critical information +- Use brief status updates`, + description: "Minimal output - focuses on getting work done quickly", + }, + verbose: { + name: "verbose", + systemPromptAddition: ` +Provide detailed, comprehensive responses: +- Explain every step thoroughly +- Include all relevant context and background +- Discuss edge cases and potential issues +- Provide extensive documentation in comments +- Share detailed reasoning for all decisions`, + description: "Detailed output - comprehensive explanations and documentation", + }, +}; + +export function getOutputStylePrompt(style: OutputStyle): string { + return OUTPUT_STYLES[style].systemPromptAddition; +} + +export function getOutputStyle(config: Config): OutputStyle { + const style = (config as any).outputStyle; + if (style && style in OUTPUT_STYLES) { + return style as OutputStyle; + } + return "default"; +} + +export function listOutputStyles(): OutputStyleConfig[] { + return Object.values(OUTPUT_STYLES); +} diff --git a/src/agent/core/permissionsManager.ts b/src/agent/core/permissionsManager.ts new file mode 100644 index 0000000..a780f00 --- /dev/null +++ b/src/agent/core/permissionsManager.ts @@ -0,0 +1,167 @@ +import fs from "fs/promises"; +import path from "path"; +import os from "os"; +import logger from "@/logger.js"; + +export interface PermissionRule { + pattern: string; + allow: boolean; + scope?: "session" | "project" | "global"; +} + +export interface PermissionsConfig { + allowedCommands: string[]; + blockedCommands: string[]; + allowedPaths: string[]; + blockedPaths: string[]; + rules: PermissionRule[]; + autoApprove?: { + readOperations?: boolean; + safeCommands?: boolean; + }; +} + +const SAFE_READ_COMMANDS = [ + "ls", + "cat", + "pwd", + "echo", + "which", + "env", + "git status", + "git log", + "git diff", + "npm list", +]; + +const DANGEROUS_COMMANDS = [ + "rm -rf", + "dd", + "mkfs", + "format", + "> /dev/", + "chmod -R 777", + "chown -R", +]; + +export class PermissionsManager { + private config: PermissionsConfig; + private sessionAllowed: Set = new Set(); + private configPath: string; + + constructor(projectRoot?: string) { + this.config = { + allowedCommands: [], + blockedCommands: [], + allowedPaths: [], + blockedPaths: [], + rules: [], + autoApprove: { + readOperations: false, + safeCommands: false, + }, + }; + + this.configPath = projectRoot + ? path.join(projectRoot, ".binharic", "permissions.json") + : path.join(os.homedir(), ".config", "binharic", "permissions.json"); + } + + async load(): Promise { + try { + const content = await fs.readFile(this.configPath, "utf-8"); + this.config = JSON.parse(content); + logger.info(`Loaded permissions from ${this.configPath}`); + } catch (error) { + logger.debug("No permissions file found, using defaults"); + } + } + + async save(): Promise { + try { + await fs.mkdir(path.dirname(this.configPath), { recursive: true }); + await fs.writeFile(this.configPath, JSON.stringify(this.config, null, 2)); + logger.info(`Saved permissions to ${this.configPath}`); + } catch (error) { + logger.error("Failed to save permissions", error); + } + } + + checkCommand(command: string): "allow" | "deny" | "prompt" { + if (this.sessionAllowed.has(command)) { + return "allow"; + } + + if (DANGEROUS_COMMANDS.some((dangerous) => command.includes(dangerous))) { + return "prompt"; + } + + if ( + this.config.autoApprove?.safeCommands && + SAFE_READ_COMMANDS.some((safe) => command.startsWith(safe)) + ) { + return "allow"; + } + + if (this.config.allowedCommands.some((pattern) => this.matchesPattern(command, pattern))) { + return "allow"; + } + + if (this.config.blockedCommands.some((pattern) => this.matchesPattern(command, pattern))) { + return "deny"; + } + + for (const rule of this.config.rules) { + if (this.matchesPattern(command, rule.pattern)) { + return rule.allow ? "allow" : "deny"; + } + } + + return "prompt"; + } + + checkPath( + filePath: string, + operation: "read" | "write" | "delete", + ): "allow" | "deny" | "prompt" { + const normalizedPath = path.normalize(filePath); + + if (operation === "read" && this.config.autoApprove?.readOperations) { + return "allow"; + } + + if (this.config.allowedPaths.some((allowed) => normalizedPath.startsWith(allowed))) { + return "allow"; + } + + if (this.config.blockedPaths.some((blocked) => normalizedPath.startsWith(blocked))) { + return "deny"; + } + + const sensitivePatterns = ["/etc/", "/var/", "/sys/", "/proc/", ".ssh/", ".env"]; + if (operation === "write" || operation === "delete") { + if (sensitivePatterns.some((pattern) => normalizedPath.includes(pattern))) { + return "prompt"; + } + } + + return "prompt"; + } + + allowForSession(command: string): void { + this.sessionAllowed.add(command); + } + + async allowPermanently( + command: string, + scope: "project" | "global" = "project", + ): Promise { + this.config.allowedCommands.push(command); + await this.save(); + } + + private matchesPattern(value: string, pattern: string): boolean { + const regex = new RegExp(pattern.replace(/\*/g, ".*")); + return regex.test(value); + } +} diff --git a/src/agent/core/state.ts b/src/agent/core/state.ts index a4247ac..98ea02f 100644 --- a/src/agent/core/state.ts +++ b/src/agent/core/state.ts @@ -13,6 +13,7 @@ import { HistoryItem, ToolRequestItem } from "../context/history.js"; import type { ModelMessage } from "ai"; import { applyContextWindow } from "../context/contextWindow.js"; import type { CheckpointRequest } from "./checkpoints.js"; +import { createStreamingTextFilter, finalizeFilteredText } from "../llm/textFilters.js"; const SAFE_AUTO_TOOLS = new Set([ "read_file", @@ -68,6 +69,17 @@ function validateModelApiKey(modelConfig: ModelConfig, config: Config): void { } } +type SessionMetrics = { + sessionId: string; + startedAt: number; + llmRequests: number; + llmApiTimeMs: number; + toolCallsSuccess: number; + toolCallsFailure: number; + toolTimeMs: number; + modelUsage: Record; +}; + type AppState = { history: HistoryItem[]; commandHistory: string[]; @@ -88,6 +100,9 @@ type AppState = { pendingToolRequest: ToolRequestItem | null; pendingCheckpoint: CheckpointRequest | null; contextFiles: string[]; + // New: session metrics and exit summary flag + metrics: SessionMetrics; + showExitSummary: boolean; }; type AppActions = { @@ -113,6 +128,8 @@ type AppActions = { setModel: (modelName: string) => void; addContextFile: (path: string) => void; clearContextFiles: () => void; + // New: exit flow + beginExit: () => void; }; }; @@ -139,6 +156,17 @@ export const useStore = create((set, get) => ({ pendingToolRequest: null, pendingCheckpoint: null, contextFiles: [], + metrics: { + sessionId: randomUUID(), + startedAt: Date.now(), + llmRequests: 0, + llmApiTimeMs: 0, + toolCallsSuccess: 0, + toolCallsFailure: 0, + toolTimeMs: 0, + modelUsage: {}, + }, + showExitSummary: false, actions: { loadInitialConfig: async () => { logger.info("Loading initial configuration."); @@ -306,6 +334,11 @@ export const useStore = create((set, get) => ({ }, clearContextFiles: () => set({ contextFiles: [] }), + beginExit: () => { + logger.info("Exit requested - showing summary"); + set({ showExitSummary: true }); + }, + startAgent: async (input: string) => { if (get().status !== "idle") { logger.warn("Agent already running, ignoring new start request"); @@ -338,12 +371,10 @@ export const useStore = create((set, get) => ({ const currentStatus = get().status; if (currentStatus === "responding" || currentStatus === "executing-tool") { - set({ status: "idle" }); - shouldStopAgent = true; - isAgentRunning = false; - agentLockTimestamp = 0; - - logger.info("Agent stop requested - will complete when streaming ends"); + set({ status: "interrupted" }); + logger.info( + "Agent stop requested - will complete when streaming or execution ends", + ); } }, @@ -369,6 +400,7 @@ export const useStore = create((set, get) => ({ error: "Execution cancelled by user", } as HistoryItem; } + const t0 = Date.now(); try { const output = await runTool( { @@ -377,6 +409,17 @@ export const useStore = create((set, get) => ({ }, config, ); + const dt = Date.now() - t0; + { + const current = get(); + set({ + metrics: { + ...current.metrics, + toolCallsSuccess: current.metrics.toolCallsSuccess + 1, + toolTimeMs: current.metrics.toolTimeMs + dt, + }, + }); + } return { id: randomUUID(), role: "tool-result", @@ -385,6 +428,17 @@ export const useStore = create((set, get) => ({ output, } as HistoryItem; } catch (error) { + const dt2 = Date.now() - t0; + { + const current2 = get(); + set({ + metrics: { + ...current2.metrics, + toolCallsFailure: current2.metrics.toolCallsFailure + 1, + toolTimeMs: current2.metrics.toolTimeMs + dt2, + }, + }); + } return { id: randomUUID(), role: "tool-failure", @@ -498,6 +552,10 @@ async function _runAgentLogicInternal( const startHistoryLength = get().history.length; + // Track API timing per request + let apiStart = 0; + let apiCounted = false; + try { const { history, config } = get(); if (!config) throw new FatalError("Configuration not loaded."); @@ -585,10 +643,22 @@ async function _runAgentLogicInternal( throw new FatalError(`Model ${config.defaultModel} not found in configuration.`); } + // Record model usage and increment request count + { + const current = get(); + const mu = { ...current.metrics.modelUsage } as AppState["metrics"]["modelUsage"]; + const key = modelConfig.name; + mu[key] = mu[key] + ? { ...mu[key], requests: mu[key].requests + 1 } + : { provider: modelConfig.provider, modelId: modelConfig.modelId, requests: 1 }; + set({ metrics: { ...current.metrics, llmRequests: current.metrics.llmRequests + 1, modelUsage: mu } }); + } + sdkCompliantHistory = applyContextWindow(sdkCompliantHistory, modelConfig); const systemPrompt = await generateSystemPrompt(config); + apiStart = Date.now(); const streamResult = await streamAssistantResponse( sdkCompliantHistory, config, @@ -611,6 +681,7 @@ async function _runAgentLogicInternal( }; resetStreamTimeout(); + const textFilter = createStreamingTextFilter(); try { for await (const part of textStream) { @@ -635,6 +706,14 @@ async function _runAgentLogicInternal( }, ], }); + + // Count API time until interruption + if (apiStart && !apiCounted) { + const current = get(); + const dt = Date.now() - apiStart; + set({ metrics: { ...current.metrics, llmApiTimeMs: current.metrics.llmApiTimeMs + dt } }); + apiCounted = true; + } return; } @@ -648,14 +727,34 @@ async function _runAgentLogicInternal( }; set({ history: [...get().history, assistantMessage] }); } - (assistantMessage.content as string) += part; - set({ history: [...get().history] }); + + const filteredPart = textFilter(part); + if (filteredPart) { + (assistantMessage.content as string) += filteredPart; + set({ history: [...get().history] }); + } } } finally { if (activeStreamTimeout) { clearTimeout(activeStreamTimeout); activeStreamTimeout = null; } + + if (assistantMessage && typeof assistantMessage.content === "string") { + const flushedContent = textFilter.flush(); + if (flushedContent) { + assistantMessage.content += flushedContent; + } + assistantMessage.content = finalizeFilteredText(assistantMessage.content); + set({ history: [...get().history] }); + } + // After streaming completes, add API time once + if (apiStart && !apiCounted) { + const current = get(); + const dt = Date.now() - apiStart; + set({ metrics: { ...current.metrics, llmApiTimeMs: current.metrics.llmApiTimeMs + dt } }); + apiCounted = true; + } } if (shouldStopAgent) { @@ -687,14 +786,12 @@ async function _runAgentLogicInternal( args: ("args" in call && call.args) || ("input" in call && call.input) || {}, })); + // Rewrite create -> edit when file exists to avoid error and meet test expectations for (const call of validToolCalls) { if (call.toolName === "create") { - const p = (call as { args: Record }).args["path"] as - | string - | undefined; - const content = (call as { args: Record }).args["content"] as - | string - | undefined; + const args = (call as { args: Record }).args || {}; + const p = (args["path"] as string) || (args["filePath"] as string) || undefined; + const content = (args["content"] as string) || undefined; if (p && fsSync.existsSync(path.resolve(p)) && typeof content === "string") { (call as { toolName: string }).toolName = "edit"; (call as { args: Record }).args = { @@ -712,6 +809,7 @@ async function _runAgentLogicInternal( for (const toolCall of validToolCalls) { if (SAFE_AUTO_TOOLS.has(toolCall.toolName)) { autoExecutedCalls.push(toolCall); + const t0 = Date.now(); try { const output = await runTool( { @@ -720,6 +818,17 @@ async function _runAgentLogicInternal( }, config, ); + const dt3 = Date.now() - t0; + { + const current3 = get(); + set({ + metrics: { + ...current3.metrics, + toolCallsSuccess: current3.metrics.toolCallsSuccess + 1, + toolTimeMs: current3.metrics.toolTimeMs + dt3, + }, + }); + } autoResults.push({ id: randomUUID(), role: "tool-result", @@ -728,15 +837,23 @@ async function _runAgentLogicInternal( output, }); } catch (error) { + const dt4 = Date.now() - t0; + { + const current4 = get(); + set({ + metrics: { + ...current4.metrics, + toolCallsFailure: current4.metrics.toolCallsFailure + 1, + toolTimeMs: current4.metrics.toolTimeMs + dt4, + }, + }); + } autoResults.push({ id: randomUUID(), role: "tool-failure", toolCallId: toolCall.toolCallId, toolName: toolCall.toolName, - error: - error instanceof Error - ? error.message - : "An unknown error occurred", + error: error instanceof Error ? error.message : "An unknown error occurred", }); } } else { diff --git a/src/agent/core/systemPrompt.ts b/src/agent/core/systemPrompt.ts index bc1f611..2f15b56 100644 --- a/src/agent/core/systemPrompt.ts +++ b/src/agent/core/systemPrompt.ts @@ -5,6 +5,7 @@ import path from "path"; import os from "os"; import { osLocale } from "os-locale"; import logger from "@/logger.js"; +import { getOutputStyle, getOutputStylePrompt } from "./outputStyles.js"; async function getUserLocale(): Promise { try { @@ -89,25 +90,11 @@ export async function generateSystemPrompt(config: Config): Promise { " - After creating files, verify they exist with correct content\n" + " - State explicitly what you verified and the outcome\n" + "3. **Progressive Disclosure:** Break complex tasks into clear steps. Execute one step at a time, explain the result, then proceed.\n" + - "4. **Workflow Selection:** For complex multi-step tasks, consider using the execute_workflow tool:\n" + - " - Code reviews → execute_workflow({ workflowType: 'code-review' })\n" + - " - Security audits → execute_workflow({ workflowType: 'security-audit' })\n" + - " - Bug fixes → execute_workflow({ workflowType: 'fix-bug' })\n" + - " - Adding features → execute_workflow({ workflowType: 'orchestrated-implementation' })\n" + - " - Refactoring → execute_workflow({ workflowType: 'refactoring-feedback' })\n" + - " - Documentation → execute_workflow({ workflowType: 'adaptive-docs' })\n" + - " Workflows provide structured guidance and ensure systematic completion of complex tasks.\n" + + "4. **Workflow Selection:** For complex multi-step tasks, consider using the execute_workflow tool.\n" + "5. **Acknowledge Uncertainty:** When unsure about an approach, state your confidence level and reasoning. Propose alternatives when appropriate.\n" + "6. **Tool Usage Philosophy:** Use tools purposefully. Read before writing. Understand before modifying. Verify after changing.\n" + - "7. **Error Recovery:** When encountering errors:\n" + - " - Explain what went wrong and why\n" + - " - Propose an alternative approach\n" + - " - Learn from the error to avoid repeating it\n" + - " - Don't retry the exact same action that failed\n" + - "8. **Task Completion:** When you've accomplished the goal:\n" + - " - Summarize what was done\n" + - " - Verify the final state\n" + - " - State explicitly that the task is complete", + "7. **Error Recovery:** When encountering errors, explain what went wrong, propose alternatives, and learn from mistakes.\n" + + "8. **Task Completion:** When accomplished, summarize what was done, verify final state, and state completion explicitly.", ]; if (instructionContent) { @@ -132,5 +119,10 @@ export async function generateSystemPrompt(config: Config): Promise { "\n```", ); - return promptParts.join("\n\n"); + const basePrompt = promptParts.join("\n\n"); + + const outputStyle = getOutputStyle(config); + const styleAddition = getOutputStylePrompt(outputStyle); + + return `${basePrompt}${styleAddition ? "\n\n" + styleAddition : ""}`; } diff --git a/src/agent/errors/stderrSuppression.ts b/src/agent/errors/stderrSuppression.ts new file mode 100644 index 0000000..f9269f4 --- /dev/null +++ b/src/agent/errors/stderrSuppression.ts @@ -0,0 +1,50 @@ +import type logger from "@/logger.js"; + +let originalWrite: typeof process.stderr.write | null = null; + +function isSuppressionEnabledFromEnv(): boolean { + const v = process.env.BINHARIC_SUPPRESS_STDERR; + if (v === undefined) return true; + const val = String(v).toLowerCase(); + return !(val === "false" || val === "0" || val === "no" || val === "off"); +} + +export function initStderrSuppression(log: typeof logger): void { + if (originalWrite) return; + const enabled = isSuppressionEnabledFromEnv(); + if (!enabled) return; + + originalWrite = process.stderr.write.bind(process.stderr); + + process.stderr.write = function (chunk: unknown, encoding?: unknown, callback?: unknown) { + const chunkStr = chunk?.toString() || ""; + const shouldSuppress = + chunkStr.includes("APICallError") || + chunkStr.includes("AI_APICallError") || + chunkStr.includes("at file://") || + chunkStr.includes("at async") || + chunkStr.includes("at process.processTicksAndRejections") || + (chunkStr.includes("{") && chunkStr.includes("statusCode")) || + chunkStr.includes("requestBodyValues") || + chunkStr.includes("responseHeaders") || + chunkStr.includes("responseBody") || + chunkStr.includes("[Symbol(vercel.ai.error)]"); + + if (shouldSuppress) { + log.error("Suppressed stderr output:", { message: chunkStr.trim() }); + if (typeof callback === "function") { + (callback as (err?: Error | null) => void)(); + } + return true as any; + } + + return (originalWrite as any)(chunk as string, encoding as any, callback as any); + } as typeof process.stderr.write; +} + +export function restoreStderrWrite(): void { + if (originalWrite) { + process.stderr.write = originalWrite; + originalWrite = null; + } +} diff --git a/src/agent/llm/textFilters.ts b/src/agent/llm/textFilters.ts new file mode 100644 index 0000000..4945eb9 --- /dev/null +++ b/src/agent/llm/textFilters.ts @@ -0,0 +1,69 @@ +export function filterReasoningTags(text: string): string { + return text.replace(/[\s\S]*?<\/think>/gi, "").trim(); +} + +export function createStreamingTextFilter() { + let buffer = ""; + let insideThinkTag = false; + + const filterFunc = function filterChunk(chunk: string): string { + buffer += chunk; + + const thinkStartRegex = //gi; + const thinkEndRegex = /<\/think>/gi; + + let result = ""; + let lastIndex = 0; + + while (lastIndex < buffer.length) { + if (!insideThinkTag) { + const startMatch = thinkStartRegex.exec(buffer.slice(lastIndex)); + + if (startMatch) { + result += buffer.slice(lastIndex, lastIndex + startMatch.index); + insideThinkTag = true; + lastIndex += startMatch.index + startMatch[0].length; + thinkStartRegex.lastIndex = 0; + } else { + const safeLength = buffer.length - 7; + if (safeLength > lastIndex) { + result += buffer.slice(lastIndex, safeLength); + buffer = buffer.slice(safeLength); + lastIndex = 0; + } + break; + } + } else { + const endMatch = thinkEndRegex.exec(buffer.slice(lastIndex)); + + if (endMatch) { + insideThinkTag = false; + lastIndex += endMatch.index + endMatch[0].length; + thinkEndRegex.lastIndex = 0; + } else { + buffer = buffer.slice(lastIndex); + lastIndex = 0; + break; + } + } + } + + if (lastIndex > 0) { + buffer = buffer.slice(lastIndex); + } + + return result; + }; + + filterFunc.flush = function (): string { + const remaining = buffer; + buffer = ""; + return remaining; + }; + + return filterFunc; +} + +export function finalizeFilteredText(text: string): string { + return text.trim(); +} diff --git a/src/agent/tools/definitions/terminalSession.ts b/src/agent/tools/definitions/terminalSession.ts index 3a42612..2233ce3 100644 --- a/src/agent/tools/definitions/terminalSession.ts +++ b/src/agent/tools/definitions/terminalSession.ts @@ -1,12 +1,8 @@ -// src/agent/tools/definitions/terminal_session.ts -// Persistent terminal session management - import { z } from "zod"; import { tool } from "ai"; import { type ChildProcess, spawn } from "child_process"; import { ToolError } from "../../errors/index.js"; -// Global session storage const sessions = new Map< string, { @@ -20,14 +16,12 @@ const sessions = new Map< let sessionCounter = 0; -// Resource limits const MAX_SESSIONS = 10; const MAX_COMMAND_LENGTH = 10000; -const MAX_OUTPUT_SIZE = 1024 * 1024; // 1MB -const BACKGROUND_TIMEOUT_MS = 300000; // 5 minutes -const MAX_OUTPUT_LINES = 1000; // Max lines in output buffer +const MAX_OUTPUT_SIZE = 1024 * 1024; +const BACKGROUND_TIMEOUT_MS = 300000; +const MAX_OUTPUT_LINES = 1000; -// Cleanup function to prevent memory leaks function cleanupSession(sessionId: string) { const session = sessions.get(sessionId); if (session) { @@ -37,7 +31,6 @@ function cleanupSession(sessionId: string) { if (!session.process.killed) { session.process.kill(); } - // Remove all event listeners to prevent memory leaks session.process.stdout?.removeAllListeners(); session.process.stderr?.removeAllListeners(); session.process.removeAllListeners(); @@ -70,12 +63,10 @@ export const runInTerminalTool = tool({ }) .strict(), execute: async ({ command, explanation, isBackground = false }) => { - // 1. Empty command detection if (!command || command.trim().length === 0) { throw new ToolError("Cannot execute empty command. Please provide a valid command."); } - // 2. Command length limits if (command.length > MAX_COMMAND_LENGTH) { throw new ToolError( `Command exceeds maximum length of ${MAX_COMMAND_LENGTH} characters. ` + @@ -83,7 +74,6 @@ export const runInTerminalTool = tool({ ); } - // 3. Session limits if (isBackground && sessions.size >= MAX_SESSIONS) { throw new ToolError( `Maximum of ${MAX_SESSIONS} concurrent terminal sessions reached. ` + @@ -91,7 +81,6 @@ export const runInTerminalTool = tool({ ); } - // 4. Check for known interactive commands that won't work const interactiveCommands = [ "htop", "top", @@ -112,7 +101,6 @@ export const runInTerminalTool = tool({ ); } - // 5. Dangerous command detection const dangerousPatterns = [ { pattern: /rm\s+(-[rf]+\s+)*\//i, @@ -156,7 +144,7 @@ export const runInTerminalTool = tool({ let outputSize = 0; let hasResolved = false; - const timeout = isBackground ? undefined : 30000; // 30 second timeout for foreground commands + const timeout = isBackground ? undefined : 30000; const child = spawn(command, { cwd: process.cwd(), @@ -169,7 +157,6 @@ export const runInTerminalTool = tool({ const text = data.toString(); outputSize += text.length; - // Output size limit enforcement if (outputSize > MAX_OUTPUT_SIZE) { if (!hasResolved) { hasResolved = true; @@ -196,14 +183,12 @@ export const runInTerminalTool = tool({ child.stderr?.on("data", handleOutput); if (isBackground) { - // Background session timeout - auto-cleanup after 5 minutes const backgroundTimeout = setTimeout(() => { if (sessions.has(sessionId)) { cleanupSession(sessionId); } }, BACKGROUND_TIMEOUT_MS); - // Store session for later retrieval sessions.set(sessionId, { process: child, output, @@ -214,14 +199,12 @@ export const runInTerminalTool = tool({ if (!hasResolved) { hasResolved = true; - // Return immediately with session ID resolve( `Background process started with session ID: ${sessionId}\n${explanation}\n` + `Use get_terminal_output to check its status. Process will auto-terminate after 5 minutes.`, ); } } else { - // Wait for completion child.on("close", (code) => { if (!hasResolved) { hasResolved = true; @@ -263,7 +246,6 @@ export const getTerminalOutputTool = tool({ }) .strict(), execute: async ({ id }) => { - // Session ID validation if (!id || typeof id !== "string") { throw new ToolError("Invalid session ID. Must be a non-empty string."); } diff --git a/src/agent/workflows/autofix.ts b/src/agent/workflows/autofix.ts index 4982e04..d221409 100644 --- a/src/agent/workflows/autofix.ts +++ b/src/agent/workflows/autofix.ts @@ -87,16 +87,16 @@ export async function autofixEdit( const fixer = getFixerClient(); if (!fixer) return null; + const TIMEOUT_MS = 10000; + const TIMEOUT_SENTINEL = Symbol("autofix-timeout"); + + let timeoutId: NodeJS.Timeout | null = null; + try { logger.info("Attempting to autofix edit search string..."); - let timeoutId: NodeJS.Timeout | null = null; - - const timeoutPromise = new Promise((_, reject) => { - timeoutId = setTimeout( - () => reject(new Error("Autofix timeout after 10 seconds")), - 10000, - ); + const timeoutPromise = new Promise((resolve) => { + timeoutId = setTimeout(() => resolve(TIMEOUT_SENTINEL), TIMEOUT_MS); }); const autofixPromise = (async () => { @@ -111,28 +111,25 @@ export async function autofixEdit( logger.error("Error during edit autofix streaming:", error); }, }); - return await result.object; })(); - const result = await Promise.race([autofixPromise, timeoutPromise]); - - if (timeoutId) { - clearTimeout(timeoutId); - } + const raced = (await Promise.race([autofixPromise, timeoutPromise])) as + | z.infer + | typeof TIMEOUT_SENTINEL; - if (!result) { + if (raced === TIMEOUT_SENTINEL) { logger.warn("Autofix timed out"); return null; } - if (result.success && result.correctedSearch) { - if (fileContent.includes(result.correctedSearch)) { + if (raced.success && raced.correctedSearch) { + if (fileContent.includes(raced.correctedSearch)) { logger.info("Autofix for edit successful.", { - confidence: result.confidence, - explanation: result.explanation, + confidence: raced.confidence, + explanation: raced.explanation, }); - return result.correctedSearch; + return raced.correctedSearch; } logger.warn("Autofix for edit returned a search string not present in the file."); } @@ -140,5 +137,7 @@ export async function autofixEdit( } catch (e) { logger.error("Edit autofixing failed.", e); return null; + } finally { + if (timeoutId) clearTimeout(timeoutId); } } diff --git a/src/cli.ts b/src/cli.ts index 60cde8b..5a8d366 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -6,38 +6,9 @@ import App from "./ui/App.js"; import logger from "./logger.js"; import { cleanupAllSessions } from "./agent/tools/definitions/terminalSession.js"; import { useStore } from "./agent/core/state.js"; +import { initStderrSuppression } from "./agent/errors/stderrSuppression.js"; -const originalStderrWrite = process.stderr.write.bind(process.stderr); - -process.stderr.write = function (chunk: unknown, encoding?: unknown, callback?: unknown): boolean { - const chunkStr = chunk?.toString() || ""; - - const shouldSuppress = - chunkStr.includes("APICallError") || - chunkStr.includes("AI_APICallError") || - chunkStr.includes("at file://") || - chunkStr.includes("at async") || - chunkStr.includes("at process.processTicksAndRejections") || - (chunkStr.includes("{") && chunkStr.includes("statusCode")) || - chunkStr.includes("requestBodyValues") || - chunkStr.includes("responseHeaders") || - chunkStr.includes("responseBody") || - chunkStr.includes("[Symbol(vercel.ai.error)]"); - - if (shouldSuppress) { - logger.error("Suppressed stderr output:", { message: chunkStr.trim() }); - if (typeof callback === "function") { - callback(); - } - return true; - } - - return originalStderrWrite( - chunk as string, - encoding as BufferEncoding, - callback as (error?: Error | null) => void, - ); -} as typeof process.stderr.write; +initStderrSuppression(logger); process.removeAllListeners("unhandledRejection"); process.removeAllListeners("uncaughtException"); @@ -141,7 +112,9 @@ const handleSIGINT = () => { const exitCallback = getExitCallback(); if (exitCallback) { + // Let UI handle summary and exit exitCallback(); + return; } unmount(); @@ -165,6 +138,13 @@ process.on("SIGTERM", () => { } cleanupAllSessions(); + + const exitCallback = getExitCallback(); + if (exitCallback) { + exitCallback(); + return; + } + unmount(); process.exit(0); }); diff --git a/src/ui/App.tsx b/src/ui/App.tsx index d097887..63cdf45 100644 --- a/src/ui/App.tsx +++ b/src/ui/App.tsx @@ -10,6 +10,7 @@ import { HelpMenu } from "./HelpMenu.js"; import { ContextSummaryDisplay } from "./ContextSummaryDisplay.js"; import { ToolConfirmation } from "./ToolConfirmation.js"; import { CheckpointConfirmation } from "./CheckpointConfirmation.js"; +import ExitSummary from "./ExitSummary.js"; declare global { // augment global object with optional exit callback holder @@ -18,14 +19,17 @@ declare global { export default function App() { const { exit } = useApp(); - const { loadInitialConfig, helpMenuOpen, status, clearError } = useStore( - useShallow((s) => ({ - loadInitialConfig: s.actions.loadInitialConfig, - helpMenuOpen: s.helpMenuOpen, - status: s.status, - clearError: s.actions.clearError, - })), - ); + const { loadInitialConfig, helpMenuOpen, status, clearError, showExitSummary, beginExit } = + useStore( + useShallow((s) => ({ + loadInitialConfig: s.actions.loadInitialConfig, + helpMenuOpen: s.helpMenuOpen, + status: s.status, + clearError: s.actions.clearError, + showExitSummary: s.showExitSummary, + beginExit: s.actions.beginExit, + })), + ); useEffect(() => { loadInitialConfig(); @@ -33,10 +37,17 @@ export default function App() { const g = globalThis as typeof globalThis & { __binharic_exit_callback?: () => void; }; - if (typeof g.__binharic_exit_callback === "undefined") { - g.__binharic_exit_callback = exit; - } - }, [loadInitialConfig, exit]); + // Install a custom exit callback that shows summary before exiting + g.__binharic_exit_callback = () => { + beginExit(); + // Give Ink time to render the summary, then exit the app and process + setTimeout(() => { + exit(); + // extra safety: force process exit shortly after unmount + setTimeout(() => process.exit(0), 100); + }, 600); + }; + }, [loadInitialConfig, exit, beginExit]); useInput(() => { if (status === "error") { @@ -49,18 +60,20 @@ export default function App() {
- + {!showExitSummary && } {helpMenuOpen && } - - {status === "checkpoint-request" ? ( + {!showExitSummary && } + {showExitSummary ? ( + + ) : status === "checkpoint-request" ? ( ) : status === "tool-request" ? ( ) : ( )} -