diff --git a/packages/agent/src/agent-loop.ts b/packages/agent/src/agent-loop.ts index 753466aca..377ea6c57 100644 --- a/packages/agent/src/agent-loop.ts +++ b/packages/agent/src/agent-loop.ts @@ -230,9 +230,13 @@ async function streamAssistantResponse( const resolvedApiKey = (config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey; + // Create custom fetch if configured + const customFetch = config.createFetch?.(config.model); + const response = await streamFunction(config.model, llmContext, { ...config, apiKey: resolvedApiKey, + fetch: customFetch, signal, }); diff --git a/packages/agent/src/agent.ts b/packages/agent/src/agent.ts index 2db01d81d..5008108e3 100644 --- a/packages/agent/src/agent.ts +++ b/packages/agent/src/agent.ts @@ -71,6 +71,12 @@ export interface AgentOptions { * Useful for expiring tokens (e.g., GitHub Copilot OAuth). */ getApiKey?: (provider: string) => Promise | string | undefined; + + /** + * Factory to create a custom fetch function for HTTP requests. + * Use this to intercept, modify, or log HTTP requests made to the LLM provider. + */ + createFetch?: (model: Model) => typeof globalThis.fetch; } export class Agent { @@ -97,6 +103,7 @@ export class Agent { public streamFn: StreamFn; private _sessionId?: string; public getApiKey?: (provider: string) => Promise | string | undefined; + public createFetch?: (model: Model) => typeof globalThis.fetch; private runningPrompt?: Promise; private resolveRunningPrompt?: () => void; @@ -109,6 +116,7 @@ export class Agent { this.streamFn = opts.streamFn || streamSimple; this._sessionId = opts.sessionId; this.getApiKey = opts.getApiKey; + this.createFetch = opts.createFetch; } /** @@ -313,6 +321,7 @@ export class Agent { convertToLlm: this.convertToLlm, transformContext: this.transformContext, getApiKey: this.getApiKey, + createFetch: this.createFetch, getSteeringMessages: async () => { if (this.steeringMode === "one-at-a-time") { if (this.steeringQueue.length > 0) { diff --git a/packages/agent/src/types.ts b/packages/agent/src/types.ts index ecdb2057c..e8027b7ce 100644 --- a/packages/agent/src/types.ts +++ b/packages/agent/src/types.ts @@ -22,6 +22,12 @@ export type StreamFn = ( export interface AgentLoopConfig extends SimpleStreamOptions { model: Model; + /** + * Factory to create a custom fetch function for HTTP requests. + * Use this to intercept, modify, or log HTTP requests made to the LLM provider. + */ + createFetch?: (model: Model) => typeof globalThis.fetch; + /** * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call. * diff --git a/packages/ai/src/providers/anthropic.ts b/packages/ai/src/providers/anthropic.ts index 4be8b3c1a..e723d089a 100644 --- a/packages/ai/src/providers/anthropic.ts +++ b/packages/ai/src/providers/anthropic.ts @@ -115,7 +115,12 @@ export const streamAnthropic: StreamFunction<"anthropic-messages"> = ( try { const apiKey = options?.apiKey ?? getEnvApiKey(model.provider) ?? ""; - const { client, isOAuthToken } = createClient(model, apiKey, options?.interleavedThinking ?? true); + const { client, isOAuthToken } = createClient( + model, + apiKey, + options?.interleavedThinking ?? true, + options?.fetch, + ); const params = buildParams(model, context, isOAuthToken, options); const anthropicStream = client.messages.stream({ ...params, stream: true }, { signal: options?.signal }); stream.push({ type: "start", partial: output }); @@ -282,6 +287,7 @@ function createClient( model: Model<"anthropic-messages">, apiKey: string, interleavedThinking: boolean, + customFetch?: typeof globalThis.fetch, ): { client: Anthropic; isOAuthToken: boolean } { const betaFeatures = ["fine-grained-tool-streaming-2025-05-14"]; if (interleavedThinking) { @@ -302,6 +308,7 @@ function createClient( baseURL: model.baseUrl, defaultHeaders, dangerouslyAllowBrowser: true, + fetch: customFetch, }); return { client, isOAuthToken: true }; @@ -318,6 +325,7 @@ function createClient( baseURL: model.baseUrl, dangerouslyAllowBrowser: true, defaultHeaders, + fetch: customFetch, }); return { client, isOAuthToken: false }; diff --git a/packages/ai/src/providers/google-gemini-cli.ts b/packages/ai/src/providers/google-gemini-cli.ts index 7bc8ef9a3..d917dfe0e 100644 --- a/packages/ai/src/providers/google-gemini-cli.ts +++ b/packages/ai/src/providers/google-gemini-cli.ts @@ -259,6 +259,7 @@ export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = ( const headers = isAntigravity ? ANTIGRAVITY_HEADERS : GEMINI_CLI_HEADERS; // Fetch with retry logic for rate limits and transient errors + const fetchFn = options?.fetch ?? globalThis.fetch; let response: Response | undefined; let lastError: Error | undefined; @@ -268,7 +269,7 @@ export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = ( } try { - response = await fetch(url, { + response = await fetchFn(url, { method: "POST", headers: { Authorization: `Bearer ${accessToken}`, diff --git a/packages/ai/src/providers/google.ts b/packages/ai/src/providers/google.ts index 67893eefa..86df7d486 100644 --- a/packages/ai/src/providers/google.ts +++ b/packages/ai/src/providers/google.ts @@ -1,3 +1,11 @@ +/** + * Google Generative AI provider using the official @google/genai SDK. + * + * Note: The @google/genai SDK does not expose a custom fetch option in GoogleGenAIOptions. + * The SDK has internal support for custom fetch but it is not accessible via the public + * GoogleGenAI constructor. As a result, HTTP hooks will not fire for this provider. + * Users who need HTTP hooks should use the google-gemini-cli provider instead. + */ import { type GenerateContentConfig, type GenerateContentParameters, diff --git a/packages/ai/src/providers/openai-codex-responses.ts b/packages/ai/src/providers/openai-codex-responses.ts index 0f74b635c..af46d8def 100644 --- a/packages/ai/src/providers/openai-codex-responses.ts +++ b/packages/ai/src/providers/openai-codex-responses.ts @@ -141,7 +141,8 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses" headers: redactHeaders(headers), }); - const response = await fetch(url, { + const fetchFn = options?.fetch ?? globalThis.fetch; + const response = await fetchFn(url, { method: "POST", headers, body: JSON.stringify(transformedBody), diff --git a/packages/ai/src/providers/openai-completions.ts b/packages/ai/src/providers/openai-completions.ts index 30ee6ca27..3df8fa451 100644 --- a/packages/ai/src/providers/openai-completions.ts +++ b/packages/ai/src/providers/openai-completions.ts @@ -100,7 +100,7 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions"> = ( try { const apiKey = options?.apiKey || getEnvApiKey(model.provider) || ""; - const client = createClient(model, context, apiKey); + const client = createClient(model, context, apiKey, options?.fetch); const params = buildParams(model, context, options); const openaiStream = await client.chat.completions.create(params, { signal: options?.signal }); stream.push({ type: "start", partial: output }); @@ -315,7 +315,12 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions"> = ( return stream; }; -function createClient(model: Model<"openai-completions">, context: Context, apiKey?: string) { +function createClient( + model: Model<"openai-completions">, + context: Context, + apiKey?: string, + customFetch?: typeof globalThis.fetch, +) { if (!apiKey) { if (!process.env.OPENAI_API_KEY) { throw new Error( @@ -356,6 +361,7 @@ function createClient(model: Model<"openai-completions">, context: Context, apiK baseURL: model.baseUrl, dangerouslyAllowBrowser: true, defaultHeaders: headers, + fetch: customFetch, }); } diff --git a/packages/ai/src/providers/openai-responses.ts b/packages/ai/src/providers/openai-responses.ts index 884687ae8..f8b9d2fee 100644 --- a/packages/ai/src/providers/openai-responses.ts +++ b/packages/ai/src/providers/openai-responses.ts @@ -83,7 +83,7 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = ( try { // Create OpenAI client const apiKey = options?.apiKey || getEnvApiKey(model.provider) || ""; - const client = createClient(model, context, apiKey); + const client = createClient(model, context, apiKey, options?.fetch); const params = buildParams(model, context, options); const openaiStream = await client.responses.create(params, { signal: options?.signal }); stream.push({ type: "start", partial: output }); @@ -312,7 +312,12 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = ( return stream; }; -function createClient(model: Model<"openai-responses">, context: Context, apiKey?: string) { +function createClient( + model: Model<"openai-responses">, + context: Context, + apiKey?: string, + customFetch?: typeof globalThis.fetch, +) { if (!apiKey) { if (!process.env.OPENAI_API_KEY) { throw new Error( @@ -353,6 +358,7 @@ function createClient(model: Model<"openai-responses">, context: Context, apiKey baseURL: model.baseUrl, dangerouslyAllowBrowser: true, defaultHeaders: headers, + fetch: customFetch, }); } diff --git a/packages/ai/src/stream.ts b/packages/ai/src/stream.ts index 79bee9846..7661e095b 100644 --- a/packages/ai/src/stream.ts +++ b/packages/ai/src/stream.ts @@ -178,6 +178,7 @@ function mapOptionsForApi( signal: options?.signal, apiKey: apiKey || options?.apiKey, sessionId: options?.sessionId, + fetch: options?.fetch, }; // Helper to clamp xhigh to high for providers that don't support it diff --git a/packages/ai/src/types.ts b/packages/ai/src/types.ts index 5c81bce9c..1c3fd3270 100644 --- a/packages/ai/src/types.ts +++ b/packages/ai/src/types.ts @@ -70,6 +70,12 @@ export interface StreamOptions { * session-aware features. Ignored by providers that don't support it. */ sessionId?: string; + /** + * Custom fetch function for HTTP hooks. + * Note: The google-generative-ai provider does not support custom fetch due to SDK limitations. + * Use google-gemini-cli instead if you need HTTP hooks for Google models. + */ + fetch?: typeof globalThis.fetch; } // Unified options with reasoning passed to streamSimple() and completeSimple() diff --git a/packages/coding-agent/CHANGELOG.md b/packages/coding-agent/CHANGELOG.md index f023b8654..c7f33e6e1 100644 --- a/packages/coding-agent/CHANGELOG.md +++ b/packages/coding-agent/CHANGELOG.md @@ -648,6 +648,13 @@ Total color count increased from 46 to 50. See [docs/theme.md](docs/theme.md) fo - **API key priority**: `ANTHROPIC_OAUTH_TOKEN` now takes precedence over `ANTHROPIC_API_KEY` - HTML export template split into separate files (template.html, template.css, template.js) for easier maintenance +### Added + +- **`http_request` hook event**: Fired before HTTP requests to LLM providers. Hooks can add custom headers or cancel requests. API keys are automatically redacted from headers. +- **`http_response` hook event**: Fired after HTTP responses from LLM providers. Includes status, headers, and request duration for logging and monitoring. + +**Note:** The `google-generative-ai` provider does not support HTTP hooks due to SDK limitations. Use `google-gemini-cli` instead for Google models if you need HTTP hooks. + ### Fixed - HTML export now properly sanitizes user messages containing HTML tags like `