diff --git a/src/content/docs/agents/api-reference/agents-api.mdx b/src/content/docs/agents/api-reference/agents-api.mdx index 4b2ac2b04ec19b..f8a9ddf93af951 100644 --- a/src/content/docs/agents/api-reference/agents-api.mdx +++ b/src/content/docs/agents/api-reference/agents-api.mdx @@ -804,14 +804,14 @@ function useAgent( The Agents SDK exposes an `AIChatAgent` class that extends the `Agent` class and exposes an `onChatMessage` method that simplifies building interactive chat agents. -You can combine this with the `useAgentChat` React hook from the `agents/ai-react` package to manage chat state and messages between a user and your Agent(s). +You can combine this with the `useAgentChat` React hook from the `@cloudflare/ai-chat/react` package to manage chat state and messages between a user and your Agent(s). #### AIChatAgent Extension of the `Agent` class with built-in chat capabilities. ```ts -import { AIChatAgent } from "agents/ai-chat-agent"; +import { AIChatAgent } from "@cloudflare/ai-chat"; import { Message, StreamTextOnFinishCallback, ToolSet } from "ai"; // Base class for chat-specific agents @@ -834,7 +834,7 @@ class AIChatAgent extends Agent { ```ts // Example of extending AIChatAgent -import { AIChatAgent } from "agents/ai-chat-agent"; +import { AIChatAgent } from "@cloudflare/ai-chat"; import { Message } from "ai"; interface Env { @@ -897,7 +897,7 @@ When you use `AIChatAgent` with `useAgentChat`: ```ts -import { AIChatAgent } from "agents/ai-chat-agent"; +import { AIChatAgent } from "@cloudflare/ai-chat"; import { streamText } from "ai"; import { openai } from "@ai-sdk/openai"; @@ -938,7 +938,7 @@ const { messages } = useAgentChat({ React hook for building AI chat interfaces using an Agent. ```ts -import { useAgentChat } from "agents/ai-react"; +import { useAgentChat } from "@cloudflare/ai-chat/react"; import { useAgent } from "agents/react"; import type { Message } from "ai"; @@ -1012,7 +1012,7 @@ function useAgentChat(options: UseAgentChatOptions): { ```tsx // Example of using useAgentChat in a React component -import { useAgentChat } from "agents/ai-react"; +import { useAgentChat } from "@cloudflare/ai-chat/react"; import { useAgent } from "agents/react"; import { useState } from "react"; diff --git a/src/content/docs/agents/concepts/ai-chat.mdx b/src/content/docs/agents/concepts/ai-chat.mdx new file mode 100644 index 00000000000000..be3d60d225e679 --- /dev/null +++ b/src/content/docs/agents/concepts/ai-chat.mdx @@ -0,0 +1,453 @@ +--- +pcx_content_type: concept +title: AI Chat Package +sidebar: + order: 8 +description: Learn about the @cloudflare/ai-chat package for building AI-powered chat experiences with persistent conversations, streaming responses, and tool support. +--- + +import { PackageManagers, TypeScriptExample } from "~/components"; + +The `@cloudflare/ai-chat` package provides a specialized framework for building intelligent chat experiences with persistent, stateful AI agents. Built on top of the Agents SDK and integrated with the AI SDK, this package handles the complexities of chat state management, message persistence, streaming, and tool execution. + +## Overview + +AI chat transcends simple request-response patterns. The `@cloudflare/ai-chat` package enables: + +- **Automatic Persistence**: Conversations are automatically saved and restored across sessions +- **Streaming Responses**: Real-time responses that flow naturally to users +- **Resumable Streaming**: Automatic recovery from disconnections without losing context +- **Tool Support**: Seamless integration of client and server-side capabilities +- **State Management**: Each conversation maintains its own memory and understanding + +Built on Cloudflare's global network with Durable Objects, your chat agents persist across time, maintaining context and relationships with users. + +## Installation + +Install the package alongside the agents framework and AI SDK: + + + +## Core Components + +### AIChatAgent + +The `AIChatAgent` class extends the base `Agent` class with chat-specific capabilities: + + + +```ts +import { AIChatAgent } from "@cloudflare/ai-chat"; +import { openai } from "@ai-sdk/openai"; +import { + streamText, + convertToModelMessages, + createUIMessageStream, + createUIMessageStreamResponse +} from "ai"; + +export class ChatAgent extends AIChatAgent { + async onChatMessage() { + const stream = createUIMessageStream({ + execute: async ({ writer }) => { + const result = streamText({ + model: openai("gpt-4o"), + messages: await convertToModelMessages(this.messages) + }); + + writer.merge(result.toUIMessageStream()); + } + }); + + return createUIMessageStreamResponse({ stream }); + } +} +``` + + + +The agent automatically: +- Persists all messages to SQLite +- Handles message format migrations +- Manages streaming state +- Coordinates tool execution + +### useAgentChat Hook + +The `useAgentChat` React hook provides a simple interface for chat interactions: + + + +```tsx +import { useAgent } from "agents/react"; +import { useAgentChat } from "@cloudflare/ai-chat/react"; + +function ChatInterface() { + const agent = useAgent({ + agent: "ChatAgent", + name: "my-chat" + }); + + const { messages, sendMessage, clearHistory, status } = useAgentChat({ + agent + }); + + const [input, setInput] = useState(""); + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + if (!input.trim()) return; + + await sendMessage({ + role: "user", + parts: [{ type: "text", text: input }] + }); + setInput(""); + }; + + return ( +
+
+ {messages.map((message) => ( +
+ {message.role}: {message.parts.map((part) => + part.type === "text" ? part.text : null + )} +
+ ))} +
+ +
+ setInput(e.target.value)} + placeholder="Type your message..." + /> +
+
+ ); +} +``` + +
+ +## Key Features + +### Resumable Streaming + +One of the most powerful features is automatic resumable streaming. When a client disconnects during a stream, the response automatically resumes when they reconnect. + +#### How it works + +1. **During streaming**: All chunks are automatically persisted to SQLite +2. **On disconnect**: The stream continues server-side, buffering chunks +3. **On reconnect**: Client receives all buffered chunks and continues streaming + +No special configuration needed - it just works. + + + +```tsx +const { messages, status } = useAgentChat({ + agent + // resume: true is the default +}); + +// Try it: Start a long response, refresh the page, and watch it resume! +``` + + + +To disable resumable streaming: + + + +```tsx +const { messages } = useAgentChat({ + agent, + resume: false +}); +``` + + + +### Tool Support + +The package supports both server-side and client-side tools for rich interactive experiences. + +#### Server-Side Tools + +Define tools that execute on the server: + + + +```ts +import { AIChatAgent } from "@cloudflare/ai-chat"; +import { streamText, convertToModelMessages, tool } from "ai"; +import { z } from "zod"; +import { createUIMessageStream, createUIMessageStreamResponse } from "ai"; + +export class ToolChat extends AIChatAgent { + async onChatMessage() { + const stream = createUIMessageStream({ + execute: async ({ writer }) => { + const result = streamText({ + model: openai("gpt-4o"), + messages: await convertToModelMessages(this.messages), + tools: { + getWeather: tool({ + description: "Get weather for a city", + parameters: z.object({ city: z.string() }), + execute: async ({ city }) => { + const weather = await fetch(`https://api.weather.com/${city}`); + return { temperature: 72, condition: "sunny" }; + } + }) + } + }); + + writer.merge(result.toUIMessageStream()); + } + }); + + return createUIMessageStreamResponse({ stream }); + } +} +``` + + + +#### Client-Side Tools + +For tools that need to run in the browser: + + + +```tsx +import { useAgentChat } from "@cloudflare/ai-chat/react"; + +function ChatWithClientTools() { + const { messages, sendMessage, addToolResult } = useAgentChat({ + agent, + onToolCall: async ({ toolCall, addToolOutput }) => { + if (toolCall.toolName === "showAlert") { + alert(toolCall.input.message); + addToolOutput({ + toolCallId: toolCall.toolCallId, + output: { success: true } + }); + } + } + }); +} +``` + + + +### Message Metadata + +Attach custom metadata to messages for tracking and analytics: + + + +```ts +import { AIChatAgent } from "@cloudflare/ai-chat"; +import { streamText, convertToModelMessages, createUIMessageStream, createUIMessageStreamResponse } from "ai"; + +export class MetadataChat extends AIChatAgent { + async onChatMessage() { + const startTime = Date.now(); + + const stream = createUIMessageStream({ + execute: async ({ writer }) => { + const result = streamText({ + model: openai("gpt-4o"), + messages: await convertToModelMessages(this.messages) + }); + + writer.merge( + result.toUIMessageStream({ + messageMetadata: ({ part }) => { + if (part.type === "start") { + return { + model: "gpt-4o", + createdAt: Date.now(), + messageCount: this.messages.length + }; + } + if (part.type === "finish") { + return { + responseTime: Date.now() - startTime, + totalTokens: part.totalUsage?.totalTokens + }; + } + } + }) + ); + } + }); + + return createUIMessageStreamResponse({ stream }); + } +} +``` + + + +Access metadata on the client: + + + +```tsx +{messages.map((message) => ( +
+ {message.metadata?.createdAt && ( + {new Date(message.metadata.createdAt).toLocaleTimeString()} + )} + {message.metadata?.totalTokens && ( + {message.metadata.totalTokens} tokens + )} +
+))} +``` + +
+ +For more details, refer to the [AI SDK Message Metadata documentation](https://ai-sdk.dev/docs/ai-sdk-ui/message-metadata). + +## Advanced Patterns + +### Custom Request Preparation + +Add custom headers or context to requests: + + + +```tsx +const { messages, sendMessage } = useAgentChat({ + agent, + prepareSendMessagesRequest: ({ id, messages }) => ({ + body: { + currentUrl: window.location.href, + userTimezone: Intl.DateTimeFormat().resolvedOptions().timeZone + }, + headers: { + "X-Widget-Version": "1.0.0", + "X-Request-ID": crypto.randomUUID() + } + }) +}); +``` + + + +### Automatic Tool Continuation + +Enable automatic continuation after tool results: + + + +```tsx +const { messages, addToolResult } = useAgentChat({ + agent, + autoContinueAfterToolResult: true, + onToolCall: async ({ toolCall, addToolOutput }) => { + const result = await executeTool(toolCall); + addToolOutput({ + toolCallId: toolCall.toolCallId, + output: result, + autoContinue: true + }); + } +}); +``` + + + +### Human-in-the-Loop Tools + +For tools requiring user approval: + + + +```tsx +const { messages, addToolResult } = useAgentChat({ + agent, + toolsRequiringConfirmation: ["sendEmail", "deleteFile"], + onToolCall: async ({ toolCall, addToolOutput }) => { + if (toolCall.toolName === "sendEmail") { + const approved = confirm(`Send email to ${toolCall.input.recipient}?`); + if (approved) { + const result = await sendEmail(toolCall.input); + addToolOutput({ + toolCallId: toolCall.toolCallId, + output: result, + autoContinue: true + }); + } + } + } +}); +``` + + + +## Integration with Agents Framework + +The `@cloudflare/ai-chat` package is built on top of the [Agents SDK](/agents/). It extends the base `Agent` class with chat-specific capabilities: + +- Automatic message persistence in SQLite +- WebSocket-based real-time communication +- Resumable streaming infrastructure +- Tool execution coordination + +For more information about the underlying agent framework, refer to the [Agents documentation](/agents/). + +## API Reference + +### AIChatAgent + +Base class for chat agents that extends `Agent`. + +**Methods:** + +- `onChatMessage(onFinish, options?)`: Override this method to handle chat messages and return a `Response` +- `persistMessages(messages, excludeBroadcastIds?)`: Manually persist messages (usually automatic) +- `messages`: Array of current chat messages + +**Properties:** + +- `messages: ChatMessage[]`: The current conversation messages + +### useAgentChat + +React hook for chat interactions. + +**Options:** + +- `agent`: Agent connection from `useAgent()` +- `onToolCall`: Callback for handling client-side tool execution +- `toolsRequiringConfirmation`: Array of tool names that need user approval +- `autoContinueAfterToolResult`: Automatically continue conversation after tool results +- `resume`: Enable automatic stream resumption (default: `true`) +- `prepareSendMessagesRequest`: Customize request headers/body + +**Returns:** + +- `messages`: Array of chat messages +- `sendMessage`: Function to send a new message +- `clearHistory`: Function to clear conversation history +- `addToolResult`: Function to provide tool output +- `status`: Current status ("idle" | "streaming" | "error") + +## Examples + +- [Basic Chat](/agents/getting-started/build-a-chat-agent/): Simple streaming chat +- [Resumable Streaming](https://github.com/cloudflare/agents/tree/main/examples/resumable-stream-chat): Automatic stream resumption +- [Human-in-the-Loop](/agents/guides/human-in-the-loop/): Tools requiring user approval + +## Next steps + +- Learn more about [building a chat agent](/agents/getting-started/build-a-chat-agent/) +- Explore [human-in-the-loop patterns](/agents/guides/human-in-the-loop/) +- Understand the [underlying Agent class](/agents/concepts/agent-class/) diff --git a/src/content/docs/agents/concepts/codemode.mdx b/src/content/docs/agents/concepts/codemode.mdx new file mode 100644 index 00000000000000..9e6354735f9eae --- /dev/null +++ b/src/content/docs/agents/concepts/codemode.mdx @@ -0,0 +1,438 @@ +--- +pcx_content_type: concept +title: Code Mode +sidebar: + order: 9 +description: Learn about Code Mode, which enables LLMs to write executable code that orchestrates tools, enabling complex workflows and multi-step operations. +--- + +import { PackageManagers, TypeScriptExample, WranglerConfig } from "~/components"; + +Code Mode is an experimental pattern that lets LLMs write executable code to orchestrate tools, instead of calling tools directly. **LLMs are better at writing code than calling tools** - they have seen millions of lines of real-world TypeScript but only contrived tool-calling examples. + +Code Mode converts your tools (especially MCP servers) into TypeScript APIs, enabling complex workflows, error handling, and multi-step operations that are natural in code but difficult with traditional tool calling. + +:::note[Experimental Feature] + +Code Mode is currently experimental and may have breaking changes in future releases. Use with caution in production environments. + +::: + +## Why Code Mode? + +Traditional tool calling asks LLMs to generate structured JSON that matches predefined schemas. Code Mode leverages what LLMs do best: writing code that orchestrates multiple operations. + +### Benefits + +- **Natural for LLMs**: Writing code is more natural than generating tool call JSON +- **Complex Workflows**: Chain multiple operations with conditional logic and error handling +- **MCP Server Orchestration**: Seamlessly compose operations across multiple MCP servers +- **Self-Debugging**: LLMs can revise their approach when operations fail +- **Dynamic Composition**: Combine tools in ways not anticipated by developers + +### Code Mode with MCP Servers + +Code Mode is particularly powerful when working with MCP (Model Context Protocol) servers. MCP servers provide rich, stateful interfaces to external systems, but traditional tool calling can be limiting when you need to: + +- Chain multiple MCP operations in complex workflows +- Handle stateful interactions requiring multiple round-trips +- Implement error handling and retry logic across MCP calls +- Compose different MCP servers in novel ways +- Perform conditional logic based on MCP server responses + +## Installation + +Install the package alongside the agents framework: + + + +## Configuration + +Define the required bindings in your `wrangler.toml` or `wrangler.jsonc`: + + + +```jsonc +{ + "compatibility_flags": ["experimental", "enable_ctx_exports"], + "worker_loaders": [ + { + "binding": "LOADER" + } + ], + "services": [ + { + "binding": "globalOutbound", + "service": "your-service", + "entrypoint": "globalOutbound" + }, + { + "binding": "CodeModeProxy", + "service": "your-service", + "entrypoint": "CodeModeProxy" + } + ] +} +``` + + + +## How It Works + +1. **Tool Detection**: When the LLM needs to use tools, it generates a `codemode` tool call +2. **Code Generation**: The system generates executable JavaScript code that uses your tools +3. **Safe Execution**: Code runs in an isolated worker environment with controlled access +4. **Result Return**: The executed code's result is returned to the conversation + +## Usage + +### Before (Traditional Tool Calling) + + + +```ts +import { streamText, tool } from "ai"; +import { z } from "zod"; + +const result = streamText({ + model: openai("gpt-4o"), + messages, + tools: { + getWeather: tool({ + description: "Get weather for a location", + inputSchema: z.object({ location: z.string() }), + execute: async ({ location }) => { + return `Weather in ${location}: 72°F, sunny`; + } + }), + sendEmail: tool({ + description: "Send an email", + inputSchema: z.object({ + to: z.string(), + subject: z.string(), + body: z.string() + }), + execute: async ({ to, subject, body }) => { + return `Email sent to ${to}`; + } + }) + } +}); +``` + + + +### After (With Code Mode) + + + +```ts +import { experimental_codemode as codemode } from "@cloudflare/codemode/ai"; +import { streamText, tool } from "ai"; +import { z } from "zod"; + +// Define your tools as usual +const tools = { + getWeather: tool({ + description: "Get weather for a location", + inputSchema: z.object({ location: z.string() }), + execute: async ({ location }) => { + return `Weather in ${location}: 72°F, sunny`; + } + }), + sendEmail: tool({ + description: "Send an email", + inputSchema: z.object({ + to: z.string(), + subject: z.string(), + body: z.string() + }), + execute: async ({ to, subject, body }) => { + return `Email sent to ${to}`; + } + }) +}; + +// Configure Code Mode +const { prompt, tools: wrappedTools } = await codemode({ + prompt: "You are a helpful assistant...", + tools, + globalOutbound: env.globalOutbound, + loader: env.LOADER, + proxy: this.ctx.exports.CodeModeProxy({ + props: { + binding: "MyAgent", + name: this.name, + callback: "callTool" + } + }) +}); + +// Use the wrapped tools - LLM will generate code instead! +const result = streamText({ + model: openai("gpt-4o"), + system: prompt, + messages, + tools: wrappedTools // Single "codemode" tool +}); +``` + + + +## Agent Integration + +### Basic Agent with Code Mode + + + +```ts +import { Agent } from "agents"; +import { experimental_codemode as codemode } from "@cloudflare/codemode/ai"; +import { streamText, convertToModelMessages } from "ai"; +import { openai } from "@ai-sdk/openai"; + +export class CodeModeAgent extends Agent { + async onChatMessage() { + const { prompt, tools: wrappedTools } = await codemode({ + prompt: "You are a helpful assistant...", + tools: this.tools, + globalOutbound: env.globalOutbound, + loader: env.LOADER, + proxy: this.ctx.exports.CodeModeProxy({ + props: { + binding: "CodeModeAgent", + name: this.name, + callback: "callTool" + } + }) + }); + + const result = streamText({ + model: openai("gpt-4o"), + system: prompt, + messages: await convertToModelMessages(this.messages), + tools: wrappedTools + }); + + return result.toUIMessageStreamResponse(); + } + + callTool(functionName: string, args: unknown[]) { + return this.tools[functionName]?.execute?.(args, { + abortSignal: new AbortController().signal, + toolCallId: "codemode", + messages: [] + }); + } +} + +export { CodeModeProxy } from "@cloudflare/codemode/ai"; +``` + + + +### With MCP Servers + + + +```ts +import { Agent } from "agents"; +import { experimental_codemode as codemode } from "@cloudflare/codemode/ai"; +import { streamText, convertToModelMessages } from "ai"; + +export class CodeModeAgent extends Agent { + async onChatMessage() { + const allTools = { + ...regularTools, + ...this.mcp.getAITools() // Include MCP tools + }; + + const { prompt, tools: wrappedTools } = await codemode({ + prompt: "You are a helpful assistant...", + tools: allTools, + globalOutbound: env.globalOutbound, + loader: env.LOADER, + proxy: this.ctx.exports.CodeModeProxy({ + props: { + binding: "CodeModeAgent", + name: this.name, + callback: "callTool" + } + }) + }); + + const result = streamText({ + model: openai("gpt-4o"), + system: prompt, + messages: await convertToModelMessages(this.messages), + tools: wrappedTools + }); + + return result.toUIMessageStreamResponse(); + } + + callTool(functionName: string, args: unknown[]) { + return this.tools[functionName]?.execute?.(args, { + abortSignal: new AbortController().signal, + toolCallId: "codemode", + messages: [] + }); + } +} + +export { CodeModeProxy } from "@cloudflare/codemode/ai"; +``` + + + +## Generated Code Example + +Code Mode enables complex workflows that chain multiple operations: + +```javascript +// Example generated code orchestrating multiple MCP servers: +async function executeTask() { + const files = await codemode.listFiles({ path: "/projects" }); + const recentProject = files + .filter((f) => f.type === "directory") + .sort((a, b) => new Date(b.modified) - new Date(a.modified))[0]; + + const projectStatus = await codemode.queryDatabase({ + query: "SELECT * FROM projects WHERE name = ?", + params: [recentProject.name] + }); + + if (projectStatus.length === 0 || projectStatus[0].status === "incomplete") { + await codemode.createTask({ + title: `Review project: ${recentProject.name}`, + priority: "high" + }); + await codemode.sendEmail({ + to: "team@company.com", + subject: "Project Review Needed" + }); + } + + return { success: true, project: recentProject }; +} +``` + +## Security + +Code runs in isolated Workers with millisecond startup times: + +- No network access by default - only through explicit bindings +- API keys are hidden in bindings, preventing leaks +- Custom security policies can be enforced + +### Global Outbound Control + + + +```ts +export const globalOutbound = { + fetch: async (input: string | URL | RequestInfo, init?: RequestInit) => { + const url = new URL(typeof input === "string" ? input : input.toString()); + + // Block certain domains + if (url.hostname === "example.com") { + return new Response("Not allowed", { status: 403 }); + } + + // Add rate limiting, logging, etc. + return fetch(input, init); + } +}; +``` + + + +## Environment Setup + +### Required Bindings + +- **LOADER**: Worker Loader for code execution +- **globalOutbound**: Service for network access control +- **CodeModeProxy**: Service for tool execution proxy + +### Environment Configuration + + + +```ts +export const globalOutbound = { + fetch: async (input: string | URL | RequestInfo, init?: RequestInit) => { + // Your security policies + return fetch(input, init); + } +}; + +export { CodeModeProxy } from "@cloudflare/codemode/ai"; +``` + + + +### Proxy Configuration + + + +```ts +proxy: this.ctx.exports.CodeModeProxy({ + props: { + binding: "YourAgentClass", + name: this.name, + callback: "callTool" + } +}); +``` + + + +## API Reference + +### experimental_codemode + +Wraps your tools with Code Mode, converting them into a single code-generating tool. + +**Options:** + +- `tools: ToolSet` - Your tool definitions (including MCP tools) +- `prompt: string` - System prompt for the LLM +- `globalOutbound: Fetcher` - Service binding for network access control +- `loader: WorkerLoader` - Worker Loader binding for code execution +- `proxy: Fetcher` - Proxy binding for tool execution + +**Returns:** + +- `prompt: string` - Enhanced system prompt +- `tools: ToolSet` - Wrapped tools (single "codemode" tool) + +### CodeModeProxy + +Worker entrypoint that routes tool calls back to your agent. + +**Props:** + +- `binding: string` - Your agent class name +- `name: string` - Agent instance name +- `callback: string` - Method name to call for tool execution + +## Limitations + +- **Experimental**: Subject to breaking changes +- **Requires Cloudflare Workers**: Uses Worker Loader API (beta) +- **JavaScript Only**: Python support planned +- **MCP state updates**: Need refinement + +## Examples + +Explore these examples to see Code Mode in action: + +- [Complete Demo](https://github.com/cloudflare/agents/tree/main/examples/codemode): Full working example with MCP integration +- [Blog Post](https://blog.cloudflare.com/code-mode/): Deep dive into philosophy and implementation + +## Next steps + +- Learn about [MCP server integration](/agents/model-context-protocol/) +- Understand the [Agents framework](/agents/) +- Explore [tool patterns](/agents/concepts/tools/) diff --git a/src/content/docs/agents/guides/human-in-the-loop.mdx b/src/content/docs/agents/guides/human-in-the-loop.mdx index 70c2ea28998001..842b35aba473f3 100644 --- a/src/content/docs/agents/guides/human-in-the-loop.mdx +++ b/src/content/docs/agents/guides/human-in-the-loop.mdx @@ -50,7 +50,7 @@ cd human-in-the-loop 3. Install the required dependencies: ```sh -npm install agents @ai-sdk/openai ai zod react react-dom +npm install agents @cloudflare/ai-chat @ai-sdk/openai ai zod react react-dom ``` ## 2. Set up your environment variables @@ -96,7 +96,7 @@ Create your tool definitions at `src/tools.ts`. Tools can be configured to eithe ```ts import { tool } from "ai"; import { z } from "zod"; -import type { AITool } from "agents/ai-react"; +import type { AITool } from "@cloudflare/ai-chat/react"; // Server-side tool that requires confirmation (no execute function) const getWeatherInformationTool = tool({ @@ -224,7 +224,7 @@ Create your agent implementation at `src/server.ts`: ```ts import { openai } from "@ai-sdk/openai"; import { routeAgentRequest } from "agents"; -import { AIChatAgent } from "agents/ai-chat-agent"; +import { AIChatAgent } from "@cloudflare/ai-chat"; import { convertToModelMessages, createUIMessageStream, @@ -311,7 +311,7 @@ import type { UIMessage as Message } from "ai"; import { getToolName, isToolUIPart } from "ai"; import { clientTools } from "./tools"; import { APPROVAL, toolsRequiringConfirmation } from "./utils"; -import { useAgentChat, type AITool } from "agents/ai-react"; +import { useAgentChat, type AITool } from "@cloudflare/ai-chat/react"; import { useAgent } from "agents/react"; import { useCallback, useEffect, useRef, useState } from "react"; @@ -612,6 +612,7 @@ if (part.state === "input-available") { Replace OpenAI with [Workers AI](/workers-ai/): ```ts +import { AIChatAgent } from "@cloudflare/ai-chat"; import { createWorkersAI } from "workers-ai-provider"; export class HumanInTheLoop extends AIChatAgent {