From a2da46c23f54eaf8f2574f3715c11c729697d690 Mon Sep 17 00:00:00 2001 From: Samuel Colvin Date: Tue, 16 Sep 2025 11:12:21 -0700 Subject: [PATCH 01/10] adding vercel AI chat --- .gitignore | 2 +- examples/pydantic_ai_examples/chat_app.py | 231 ++++----------- .../pydantic_ai_examples/sqlite_database.py | 81 ++++++ .../pydantic_ai/agent/abstract.py | 159 +++++++++- pydantic_ai_slim/pydantic_ai/run.py | 13 + .../vercel_ai_elements/__init__.py | 0 .../pydantic_ai/vercel_ai_elements/_utils.py | 18 ++ .../vercel_ai_elements/request_types.py | 273 ++++++++++++++++++ .../vercel_ai_elements/response_stream.py | 141 +++++++++ .../vercel_ai_elements/response_types.py | 216 ++++++++++++++ .../vercel_ai_elements/starlette.py | 69 +++++ 11 files changed, 1023 insertions(+), 180 deletions(-) create mode 100644 examples/pydantic_ai_examples/sqlite_database.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py diff --git a/.gitignore b/.gitignore index c0f10dc973..22657eee27 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ env*/ /TODO.md /postgres-data/ .DS_Store -examples/pydantic_ai_examples/.chat_app_messages.sqlite +.chat_app_messages.sqlite .cache/ .vscode/ /question_graph_history.json diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index f81211111b..95bf228571 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -7,215 +7,90 @@ from __future__ import annotations as _annotations -import asyncio -import json -import sqlite3 -from collections.abc import AsyncIterator, Callable -from concurrent.futures.thread import ThreadPoolExecutor from contextlib import asynccontextmanager from dataclasses import dataclass -from datetime import datetime, timezone -from functools import partial from pathlib import Path -from typing import Annotated, Any, Literal, TypeVar import fastapi import logfire -from fastapi import Depends, Request -from fastapi.responses import FileResponse, Response, StreamingResponse -from typing_extensions import LiteralString, ParamSpec, TypedDict - -from pydantic_ai import Agent, UnexpectedModelBehavior -from pydantic_ai.messages import ( - ModelMessage, - ModelMessagesTypeAdapter, - ModelRequest, - ModelResponse, - TextPart, - UserPromptPart, -) +from fastapi import Depends, Request, Response + +from pydantic_ai import Agent, RunContext +from pydantic_ai.vercel_ai_elements.starlette import StarletteChat + +from .sqlite_database import Database # 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured logfire.configure(send_to_logfire='if-token-present') logfire.instrument_pydantic_ai() -agent = Agent('openai:gpt-4o') THIS_DIR = Path(__file__).parent +sql_schema = """ +create table if not exists memory( + id integer primary key, + user_id integer not null, + value text not null, + unique(user_id, value) +);""" @asynccontextmanager async def lifespan(_app: fastapi.FastAPI): - async with Database.connect() as db: + async with Database.connect(sql_schema) as db: yield {'db': db} -app = fastapi.FastAPI(lifespan=lifespan) -logfire.instrument_fastapi(app) +@dataclass +class Deps: + conn: Database + user_id: int -@app.get('/') -async def index() -> FileResponse: - return FileResponse((THIS_DIR / 'chat_app.html'), media_type='text/html') +chat_agent = Agent( + 'openai:gpt-4.1', instructions='You are a helpful assistant.', deps_type=Deps +) -@app.get('/chat_app.ts') -async def main_ts() -> FileResponse: - """Get the raw typescript code, it's compiled in the browser, forgive me.""" - return FileResponse((THIS_DIR / 'chat_app.ts'), media_type='text/plain') +@chat_agent.tool +async def record_memory(ctx: RunContext[Deps], value: str) -> str: + """Use this tool to store information in memory.""" + await ctx.deps.conn.execute( + 'insert into memory(user_id, value) values(?, ?) on conflict do nothing', + ctx.deps.user_id, + value, + commit=True, + ) + return 'Value added to memory.' -async def get_db(request: Request) -> Database: - return request.state.db +@chat_agent.tool +async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str: + """Get all memories about the user.""" + rows = await ctx.deps.conn.fetchall( + 'select value from memory where user_id = ? and value like ?', + ctx.deps.user_id, + f'%{memory_contains}%', + ) + return '\n'.join([row['value'] for row in rows]) -@app.get('/chat/') -async def get_chat(database: Database = Depends(get_db)) -> Response: - msgs = await database.get_messages() - return Response( - b'\n'.join(json.dumps(to_chat_message(m)).encode('utf-8') for m in msgs), - media_type='text/plain', - ) +starlette_chat = StarletteChat(chat_agent) +app = fastapi.FastAPI(lifespan=lifespan) +logfire.instrument_fastapi(app) -class ChatMessage(TypedDict): - """Format of messages sent to the browser.""" - - role: Literal['user', 'model'] - timestamp: str - content: str - - -def to_chat_message(m: ModelMessage) -> ChatMessage: - first_part = m.parts[0] - if isinstance(m, ModelRequest): - if isinstance(first_part, UserPromptPart): - assert isinstance(first_part.content, str) - return { - 'role': 'user', - 'timestamp': first_part.timestamp.isoformat(), - 'content': first_part.content, - } - elif isinstance(m, ModelResponse): - if isinstance(first_part, TextPart): - return { - 'role': 'model', - 'timestamp': m.timestamp.isoformat(), - 'content': first_part.content, - } - raise UnexpectedModelBehavior(f'Unexpected message type for chat app: {m}') - - -@app.post('/chat/') -async def post_chat( - prompt: Annotated[str, fastapi.Form()], database: Database = Depends(get_db) -) -> StreamingResponse: - async def stream_messages(): - """Streams new line delimited JSON `Message`s to the client.""" - # stream the user prompt so that can be displayed straight away - yield ( - json.dumps( - { - 'role': 'user', - 'timestamp': datetime.now(tz=timezone.utc).isoformat(), - 'content': prompt, - } - ).encode('utf-8') - + b'\n' - ) - # get the chat history so far to pass as context to the agent - messages = await database.get_messages() - # run the agent with the user prompt and the chat history - async with agent.run_stream(prompt, message_history=messages) as result: - async for text in result.stream_output(debounce_by=0.01): - # text here is a `str` and the frontend wants - # JSON encoded ModelResponse, so we create one - m = ModelResponse(parts=[TextPart(text)], timestamp=result.timestamp()) - yield json.dumps(to_chat_message(m)).encode('utf-8') + b'\n' - - # add new messages (e.g. the user prompt and the agent response in this case) to the database - await database.add_messages(result.new_messages_json()) - - return StreamingResponse(stream_messages(), media_type='text/plain') - - -P = ParamSpec('P') -R = TypeVar('R') +async def get_db(request: Request) -> Database: + return request.state.db -@dataclass -class Database: - """Rudimentary database to store chat messages in SQLite. - - The SQLite standard library package is synchronous, so we - use a thread pool executor to run queries asynchronously. - """ - - con: sqlite3.Connection - _loop: asyncio.AbstractEventLoop - _executor: ThreadPoolExecutor - - @classmethod - @asynccontextmanager - async def connect( - cls, file: Path = THIS_DIR / '.chat_app_messages.sqlite' - ) -> AsyncIterator[Database]: - with logfire.span('connect to DB'): - loop = asyncio.get_event_loop() - executor = ThreadPoolExecutor(max_workers=1) - con = await loop.run_in_executor(executor, cls._connect, file) - slf = cls(con, loop, executor) - try: - yield slf - finally: - await slf._asyncify(con.close) - - @staticmethod - def _connect(file: Path) -> sqlite3.Connection: - con = sqlite3.connect(str(file)) - con = logfire.instrument_sqlite3(con) - cur = con.cursor() - cur.execute( - 'CREATE TABLE IF NOT EXISTS messages (id INT PRIMARY KEY, message_list TEXT);' - ) - con.commit() - return con - - async def add_messages(self, messages: bytes): - await self._asyncify( - self._execute, - 'INSERT INTO messages (message_list) VALUES (?);', - messages, - commit=True, - ) - await self._asyncify(self.con.commit) - - async def get_messages(self) -> list[ModelMessage]: - c = await self._asyncify( - self._execute, 'SELECT message_list FROM messages order by id' - ) - rows = await self._asyncify(c.fetchall) - messages: list[ModelMessage] = [] - for row in rows: - messages.extend(ModelMessagesTypeAdapter.validate_json(row[0])) - return messages - - def _execute( - self, sql: LiteralString, *args: Any, commit: bool = False - ) -> sqlite3.Cursor: - cur = self.con.cursor() - cur.execute(sql, args) - if commit: - self.con.commit() - return cur - - async def _asyncify( - self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs - ) -> R: - return await self._loop.run_in_executor( # type: ignore - self._executor, - partial(func, **kwargs), - *args, # type: ignore - ) +@app.options('/api/chat') +def options_chat(): + pass + + +@app.post('/api/chat') +async def get_chat(request: Request, database: Database = Depends(get_db)) -> Response: + return await starlette_chat.dispatch_request(request, deps=Deps(database, 123)) if __name__ == '__main__': diff --git a/examples/pydantic_ai_examples/sqlite_database.py b/examples/pydantic_ai_examples/sqlite_database.py new file mode 100644 index 0000000000..491e5020dd --- /dev/null +++ b/examples/pydantic_ai_examples/sqlite_database.py @@ -0,0 +1,81 @@ +from __future__ import annotations as _annotations + +import asyncio +import sqlite3 +from collections.abc import AsyncIterator, Callable +from concurrent.futures.thread import ThreadPoolExecutor +from contextlib import asynccontextmanager +from dataclasses import dataclass +from functools import partial +from pathlib import Path +from typing import Any, LiteralString, ParamSpec, TypeVar + +import logfire + +P = ParamSpec('P') +R = TypeVar('R') + + +@dataclass +class Database: + """Rudimentary database to store chat messages in SQLite. + + The SQLite standard library package is synchronous, so we + use a thread pool executor to run queries asynchronously. + """ + + con: sqlite3.Connection + _loop: asyncio.AbstractEventLoop + _executor: ThreadPoolExecutor + + @classmethod + @asynccontextmanager + async def connect( + cls, schema_sql: str, file: Path = Path('.chat_app_messages.sqlite') + ) -> AsyncIterator[Database]: + with logfire.span('connect to DB'): + loop = asyncio.get_event_loop() + executor = ThreadPoolExecutor(max_workers=1) + con = await loop.run_in_executor(executor, cls._connect, schema_sql, file) + slf = cls(con, loop, executor) + try: + yield slf + finally: + await slf._asyncify(con.close) + + @staticmethod + def _connect(schema_sql: str, file: Path) -> sqlite3.Connection: + con = sqlite3.connect(str(file)) + con = logfire.instrument_sqlite3(con) + cur = con.cursor() + cur.execute(schema_sql) + con.commit() + return con + + async def execute(self, sql: LiteralString, *args: Any, commit: bool = False): + await self._asyncify(self._execute, sql, *args, commit=True) + if commit: + await self._asyncify(self.con.commit) + + async def fetchall(self, sql: LiteralString, *args: Any) -> list[dict[str, Any]]: + c = await self._asyncify(self._execute, sql, *args) + rows = await self._asyncify(c.fetchall) + return [dict(row) for row in rows] + + def _execute( + self, sql: LiteralString, *args: Any, commit: bool = False + ) -> sqlite3.Cursor: + cur = self.con.cursor() + cur.execute(sql, args) + if commit: + self.con.commit() + return cur + + async def _asyncify( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + return await self._loop.run_in_executor( # type: ignore + self._executor, + partial(func, **kwargs), + *args, # type: ignore + ) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 8d6c9ff293..54c6856099 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -1,5 +1,6 @@ from __future__ import annotations as _annotations +import asyncio import inspect from abc import ABC, abstractmethod from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable, Iterator, Mapping, Sequence @@ -7,6 +8,7 @@ from types import FrameType from typing import TYPE_CHECKING, Any, Generic, TypeAlias, cast, overload +import anyio from typing_extensions import Self, TypeIs, TypeVar from pydantic_graph import End @@ -24,7 +26,7 @@ from .._tool_manager import ToolManager from ..output import OutputDataT, OutputSpec from ..result import AgentStream, FinalResult, StreamedRunResult -from ..run import AgentRun, AgentRunResult +from ..run import AgentRun, AgentRunResult, AgentRunResultEvent from ..settings import ModelSettings from ..tools import ( AgentDepsT, @@ -543,6 +545,161 @@ async def on_complete() -> None: if not yielded: raise exceptions.AgentRunError('Agent run finished without producing a final result') # pragma: no cover + @overload + def run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: None = None, + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[OutputDataT]]: ... + + @overload + def run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT], + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[RunOutputDataT]]: ... + + def run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]: + """Run the agent with a user prompt in async mode and stream events from the run. + + This is a convenience method that wraps [`self.run`][pydantic_ai.agent.AbstractAgent.run] and + uses the `event_stream_handler` kwarg to get a stream of events from the run. + + Example: + ```python + from pydantic_ai import Agent + + agent = Agent('openai:gpt-4o') + + async def main(): + async for event in agent.run_stream_events('What is the capital of France?'): + print(event) + ``` + + Arguments are the same as for [`self.run`][pydantic_ai.agent.AbstractAgent.run], + except that `event_stream_handler` is now allowed. + + Args: + user_prompt: User input to start/continue the conversation. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + + Returns: + An async iterable of stream events `AgentStreamEvent` and finally a `AgentRunResultEvent` with the final + run result. + """ + # unfortunately this hack of returning a generator rather than defining it right here is + # required to allow overloads of this method to work in python's typing system, or at least with pyright + # or at least I couldn't make it work without + return self._run_stream_events( + user_prompt, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + ) + + async def _run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]: + send_stream, receive_stream = anyio.create_memory_object_stream[ + _messages.AgentStreamEvent | AgentRunResultEvent[Any] + ]() + + async def event_stream_handler( + _: RunContext[AgentDepsT], events: AsyncIterable[_messages.AgentStreamEvent] + ) -> None: + async for event in events: + await send_stream.send(event) + + async def run_agent() -> AgentRunResult[Any]: + try: + return await self.run( + user_prompt, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + event_stream_handler=event_stream_handler, + ) + finally: + send_stream.close() + + task = asyncio.create_task(run_agent()) + + async for message in receive_stream: + yield message + + result = await task + yield AgentRunResultEvent(result) + @overload def iter( self, diff --git a/pydantic_ai_slim/pydantic_ai/run.py b/pydantic_ai_slim/pydantic_ai/run.py index 0cc9481043..39a3d9080a 100644 --- a/pydantic_ai_slim/pydantic_ai/run.py +++ b/pydantic_ai_slim/pydantic_ai/run.py @@ -353,3 +353,16 @@ def timestamp(self) -> datetime: model_response = self.all_messages()[-1] assert isinstance(model_response, _messages.ModelResponse) return model_response.timestamp + + +@dataclasses.dataclass +class AgentRunResultEvent(Generic[OutputDataT]): + """An event indicating the agent run ended and containing the final result of the agent run.""" + + result: AgentRunResult[OutputDataT] + """The result of the run.""" + + _: dataclasses.KW_ONLY + + event_kind: Literal['agent_run_result'] = 'agent_run_result' + """Event type identifier, used as a discriminator.""" diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py new file mode 100644 index 0000000000..6ef877a235 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py @@ -0,0 +1,18 @@ +from abc import ABC +from typing import Any + +from pydantic import BaseModel, ConfigDict +from pydantic.alias_generators import to_camel + +__all__ = 'ProviderMetadata', 'CamelBaseModel' + +# technically this is recursive union of JSON types +# for to simplify validation, we call it Any +JSONValue = Any + +# Provider metadata types +ProviderMetadata = dict[str, dict[str, JSONValue]] + + +class CamelBaseModel(BaseModel, ABC): + model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra='forbid') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py new file mode 100644 index 0000000000..d7dd60acb6 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py @@ -0,0 +1,273 @@ +"""Convert to Python from. + +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts + +Mostly with Claude. +""" + +from typing import Annotated, Any, Literal + +from pydantic import Discriminator, TypeAdapter + +from ._utils import CamelBaseModel, ProviderMetadata + + +class TextUIPart(CamelBaseModel): + """A text part of a message.""" + + type: Literal['text'] = 'text' + + text: str + """The text content.""" + + state: Literal['streaming', 'done'] | None = None + """The state of the text part.""" + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class ReasoningUIPart(CamelBaseModel): + """A reasoning part of a message.""" + + type: Literal['reasoning'] = 'reasoning' + + text: str + """The reasoning text.""" + + state: Literal['streaming', 'done'] | None = None + """The state of the reasoning part.""" + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class SourceUrlUIPart(CamelBaseModel): + """A source part of a message.""" + + type: Literal['source-url'] = 'source-url' + source_id: str + url: str + title: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class SourceDocumentUIPart(CamelBaseModel): + """A document source part of a message.""" + + type: Literal['source-document'] = 'source-document' + source_id: str + media_type: str + title: str + filename: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class FileUIPart(CamelBaseModel): + """A file part of a message.""" + + type: Literal['file'] = 'file' + + media_type: str + """ + IANA media type of the file. + + @see https://www.iana.org/assignments/media-types/media-types.xhtml + """ + + filename: str | None = None + """Optional filename of the file.""" + + url: str + """ + The URL of the file. + It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs). + """ + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class StepStartUIPart(CamelBaseModel): + """A step boundary part of a message.""" + + type: Literal['step-start'] = 'step-start' + + +class DataUIPart(CamelBaseModel): + """Data part with dynamic type based on data name.""" + + type: str # Will be f"data-{NAME}" + id: str | None = None + data: Any + + +# Tool part states as separate models +class ToolInputStreamingPart(CamelBaseModel): + """Tool part in input-streaming state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['input-streaming'] = 'input-streaming' + input: Any | None = None + provider_executed: bool | None = None + + +class ToolInputAvailablePart(CamelBaseModel): + """Tool part in input-available state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['input-available'] = 'input-available' + input: Any + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + + +class ToolOutputAvailablePart(CamelBaseModel): + """Tool part in output-available state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['output-available'] = 'output-available' + input: Any + output: Any + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + preliminary: bool | None = None + + +class ToolOutputErrorPart(CamelBaseModel): + """Tool part in output-error state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['output-error'] = 'output-error' + input: Any | None = None + raw_input: Any | None = None + error_text: str + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + + +# Union of all tool part states +ToolUIPart = ToolInputStreamingPart | ToolInputAvailablePart | ToolOutputAvailablePart | ToolOutputErrorPart + + +# Dynamic tool part states as separate models +class DynamicToolInputStreamingPart(CamelBaseModel): + """Dynamic tool part in input-streaming state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['input-streaming'] = 'input-streaming' + input: Any | None = None + + +class DynamicToolInputAvailablePart(CamelBaseModel): + """Dynamic tool part in input-available state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['input-available'] = 'input-available' + input: Any + call_provider_metadata: ProviderMetadata | None = None + + +class DynamicToolOutputAvailablePart(CamelBaseModel): + """Dynamic tool part in output-available state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['output-available'] = 'output-available' + input: Any + output: Any + call_provider_metadata: ProviderMetadata | None = None + preliminary: bool | None = None + + +class DynamicToolOutputErrorPart(CamelBaseModel): + """Dynamic tool part in output-error state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['output-error'] = 'output-error' + input: Any + error_text: str + call_provider_metadata: ProviderMetadata | None = None + + +# Union of all dynamic tool part states +DynamicToolUIPart = ( + DynamicToolInputStreamingPart + | DynamicToolInputAvailablePart + | DynamicToolOutputAvailablePart + | DynamicToolOutputErrorPart +) + + +UIMessagePart = ( + TextUIPart + | ReasoningUIPart + | ToolUIPart + | DynamicToolUIPart + | SourceUrlUIPart + | SourceDocumentUIPart + | FileUIPart + | DataUIPart + | StepStartUIPart +) +"""Union of all message part types.""" + + +class UIMessage(CamelBaseModel): + """A message as displayed in the UI by Vercel AI Elements.""" + + id: str + """A unique identifier for the message.""" + + role: Literal['system', 'user', 'assistant'] + """The role of the message.""" + + metadata: Any | None = None + """The metadata of the message.""" + + parts: list[UIMessagePart] + """ + The parts of the message. Use this for rendering the message in the UI. + + System messages should be avoided (set the system prompt on the server instead). + They can have text parts. + + User messages can have text parts and file parts. + + Assistant messages can have text, reasoning, tool invocation, and file parts. + """ + + +class SubmitMessage(CamelBaseModel): + """Submit a message to the agent.""" + + trigger: Literal['submit-message'] + id: str + messages: list[UIMessage] + + model: str + web_search: bool + + +class RegenerateMessage(CamelBaseModel): + """Ask the agent to regenerate a message.""" + + trigger: Literal['regenerate-message'] + id: str + messages: list[UIMessage] + message_id: str + + +RequestData = SubmitMessage | RegenerateMessage +request_data_schema: TypeAdapter[RequestData] = TypeAdapter(Annotated[RequestData, Discriminator('trigger')]) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py new file mode 100644 index 0000000000..82da300014 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py @@ -0,0 +1,141 @@ +from __future__ import annotations as _annotations + +from collections.abc import AsyncIterator +from dataclasses import dataclass, field +from typing import Any +from uuid import uuid4 + +from pydantic_core import to_json + +from .. import messages +from ..agent import Agent +from ..run import AgentRunResultEvent +from ..tools import AgentDepsT +from . import response_types as _t + +__all__ = 'sse_stream', 'VERCEL_AI_ELEMENTS_HEADERS', 'EventStreamer' +# no idea if this is important, but vercel sends it, therefore so am I +VERCEL_AI_ELEMENTS_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} + + +async def sse_stream(agent: Agent[AgentDepsT], user_prompt: str, deps: Any) -> AsyncIterator[str]: + """Stream events from an agent run as Vercel AI Elements events. + + Args: + agent: The agent to run. + user_prompt: The user prompt to run the agent with. + deps: The dependencies to pass to the agent. + + Yields: + An async iterator text lines to stream over SSE. + """ + event_streamer = EventStreamer() + async for event in agent.run_stream_events(user_prompt, deps=deps): + if not isinstance(event, AgentRunResultEvent): + async for chunk in event_streamer.event_to_chunks(event): + yield chunk.sse() + async for chunk in event_streamer.finish(): + yield chunk.sse() + + +@dataclass +class EventStreamer: + """Logic for mapping pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE.""" + + message_id: str = field(default_factory=lambda: uuid4().hex) + _final_result_tool_id: str | None = field(default=None, init=False) + + async def event_to_chunks(self, event: messages.AgentStreamEvent) -> AsyncIterator[_t.AbstractSSEChunk]: # noqa C901 + """Convert pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE. + + Args: + event: The pydantic-ai event to convert. + + Yields: + An async iterator of Vercel AI Elements events. + """ + match event: + case messages.PartStartEvent(part=part): + match part: + case messages.TextPart(content=content): + yield _t.TextStartChunk(id=self.message_id) + yield _t.TextDeltaChunk(id=self.message_id, delta=content) + case ( + messages.ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) + | messages.BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) + ): + yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) + if isinstance(args, str): + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) + elif args is not None: + yield ( + _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) + ) + + case messages.BuiltinToolReturnPart( + tool_name=tool_name, tool_call_id=tool_call_id, content=content + ): + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) + + case messages.ThinkingPart(content=content): + yield _t.ReasoningStartChunk(id=self.message_id) + yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content) + + case messages.PartDeltaEvent(delta=delta): + match delta: + case messages.TextPartDelta(content_delta=content_delta): + yield _t.TextDeltaChunk(id=self.message_id, delta=content_delta) + case messages.ThinkingPartDelta(content_delta=content_delta): + if content_delta: + yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content_delta) + case messages.ToolCallPartDelta(args_delta=args, tool_call_id=tool_call_id): + tool_call_id = tool_call_id or '' + if isinstance(args, str): + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) + elif args is not None: + yield ( + _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) + ) + case messages.FinalResultEvent(tool_name=tool_name, tool_call_id=tool_call_id): + if tool_call_id and tool_name: + self._final_result_tool_id = tool_call_id + yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) + case messages.FunctionToolCallEvent(): + pass + # print(f'TODO FunctionToolCallEvent {part}') + case messages.FunctionToolResultEvent(result=result): + match result: + case messages.ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) + case messages.RetryPromptPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) + case messages.BuiltinToolCallEvent(part=part): + tool_call_id = part.tool_call_id + tool_name = part.tool_name + args = part.args + yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) + if isinstance(args, str): + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) + elif args is not None: + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) + case messages.BuiltinToolResultEvent(result=result): + yield _t.ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + + async def finish(self) -> AsyncIterator[_t.AbstractSSEChunk | DoneChunk]: + """Send extra messages required to close off the stream.""" + if tool_call_id := self._final_result_tool_id: + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) + yield _t.FinishChunk() + yield DoneChunk() + + +class DoneChunk: + def sse(self) -> str: + return '[DONE]' + + def __str__(self) -> str: + return 'DoneChunk' + + +def _json_dumps(obj: Any) -> str: + return to_json(obj).decode('utf-8') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py new file mode 100644 index 0000000000..8538dd9680 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py @@ -0,0 +1,216 @@ +"""Convert to Python from. + +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts + +Mostly with Claude. +""" + +from typing import Any, Literal + +from ._utils import CamelBaseModel, ProviderMetadata + + +class AbstractSSEChunk(CamelBaseModel): + """Abstract base class for response SSE even.""" + + def sse(self) -> str: + return self.model_dump_json(exclude_none=True, by_alias=True) + + +class TextStartChunk(AbstractSSEChunk): + """Text start chunk.""" + + type: Literal['text-start'] = 'text-start' + id: str + provider_metadata: ProviderMetadata | None = None + + +class TextDeltaChunk(AbstractSSEChunk): + """Text delta chunk.""" + + type: Literal['text-delta'] = 'text-delta' + delta: str + id: str + provider_metadata: ProviderMetadata | None = None + + +class TextEndChunk(AbstractSSEChunk): + """Text end chunk.""" + + type: Literal['text-end'] = 'text-end' + id: str + provider_metadata: ProviderMetadata | None = None + + +class ReasoningStartChunk(AbstractSSEChunk): + """Reasoning start chunk.""" + + type: Literal['reasoning-start'] = 'reasoning-start' + id: str + provider_metadata: ProviderMetadata | None = None + + +class ReasoningDeltaChunk(AbstractSSEChunk): + """Reasoning delta chunk.""" + + type: Literal['reasoning-delta'] = 'reasoning-delta' + id: str + delta: str + provider_metadata: ProviderMetadata | None = None + + +class ReasoningEndChunk(AbstractSSEChunk): + """Reasoning end chunk.""" + + type: Literal['reasoning-end'] = 'reasoning-end' + id: str + provider_metadata: ProviderMetadata | None = None + + +class ErrorChunk(AbstractSSEChunk): + """Error chunk.""" + + type: Literal['error'] = 'error' + error_text: str + + +class ToolInputAvailableChunk(AbstractSSEChunk): + """Tool input available chunk.""" + + type: Literal['tool-input-available'] = 'tool-input-available' + tool_call_id: str + tool_name: str + input: Any + provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None + dynamic: bool | None = None + + +class ToolInputErrorChunk(AbstractSSEChunk): + """Tool input error chunk.""" + + type: Literal['tool-input-error'] = 'tool-input-error' + tool_call_id: str + tool_name: str + input: Any + provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None + dynamic: bool | None = None + error_text: str + + +class ToolOutputAvailableChunk(AbstractSSEChunk): + """Tool output available chunk.""" + + type: Literal['tool-output-available'] = 'tool-output-available' + tool_call_id: str + output: Any + provider_executed: bool | None = None + dynamic: bool | None = None + preliminary: bool | None = None + + +class ToolOutputErrorChunk(AbstractSSEChunk): + """Tool output error chunk.""" + + type: Literal['tool-output-error'] = 'tool-output-error' + tool_call_id: str + error_text: str + provider_executed: bool | None = None + dynamic: bool | None = None + + +class ToolInputStartChunk(AbstractSSEChunk): + """Tool input start chunk.""" + + type: Literal['tool-input-start'] = 'tool-input-start' + tool_call_id: str + tool_name: str + provider_executed: bool | None = None + dynamic: bool | None = None + + +class ToolInputDeltaChunk(AbstractSSEChunk): + """Tool input delta chunk.""" + + type: Literal['tool-input-delta'] = 'tool-input-delta' + tool_call_id: str + input_text_delta: str + + +# Source chunk types +class SourceUrlChunk(AbstractSSEChunk): + """Source URL chunk.""" + + type: Literal['source-url'] = 'source-url' + source_id: str + url: str + title: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class SourceDocumentChunk(AbstractSSEChunk): + """Source document chunk.""" + + type: Literal['source-document'] = 'source-document' + source_id: str + media_type: str + title: str + filename: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class FileChunk(AbstractSSEChunk): + """File chunk.""" + + type: Literal['file'] = 'file' + url: str + media_type: str + + +class DataUIMessageChunk(AbstractSSEChunk): + """Data UI message chunk with dynamic type.""" + + type: str # Will be f"data-{NAME}" + data: Any + + +class StartStepChunk(AbstractSSEChunk): + """Start step chunk.""" + + type: Literal['start-step'] = 'start-step' + + +class FinishStepChunk(AbstractSSEChunk): + """Finish step chunk.""" + + type: Literal['finish-step'] = 'finish-step' + + +# Message lifecycle chunk types +class StartChunk(AbstractSSEChunk): + """Start chunk.""" + + type: Literal['start'] = 'start' + message_id: str | None = None + message_metadata: Any | None = None + + +class FinishChunk(AbstractSSEChunk): + """Finish chunk.""" + + type: Literal['finish'] = 'finish' + message_metadata: Any | None = None + + +class AbortChunk(AbstractSSEChunk): + """Abort chunk.""" + + type: Literal['abort'] = 'abort' + + +class MessageMetadataChunk(AbstractSSEChunk): + """Message metadata chunk.""" + + type: Literal['message-metadata'] = 'message-metadata' + message_metadata: Any diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py new file mode 100644 index 0000000000..59868afeef --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py @@ -0,0 +1,69 @@ +from dataclasses import dataclass +from typing import Generic + +from pydantic import ValidationError + +from ..agent import Agent +from ..tools import AgentDepsT +from .request_types import RequestData, TextUIPart, request_data_schema +from .response_stream import VERCEL_AI_ELEMENTS_HEADERS, sse_stream + +try: + from sse_starlette.sse import EventSourceResponse + from starlette.requests import Request + from starlette.responses import JSONResponse, Response +except ImportError as e: + raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e + + +@dataclass +class StarletteChat(Generic[AgentDepsT]): + """Starlette support for Pydantic AI's Vercel AI Elements integration. + + This can be used with either FastAPI or Starlette apps. + """ + + agent: Agent[AgentDepsT] + + async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: + """Handle a request and return a streamed SSE response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + body = await request.body() + try: + data = request_data_schema.validate_json(body) + except ValidationError as e: + return JSONResponse({'errors': e.errors()}, status_code=422) + else: + return await self.handle_request_data(data, deps) + + async def handle_request_data(self, data: RequestData, deps: AgentDepsT) -> Response: + """Handle request data that has already been validated and return a streamed SSE response. + + Args: + data: The validated request data. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + if not data.messages: + return JSONResponse({'errors': 'no messages provided'}) + + message = data.messages[-1] + prompt: list[str] = [] + for part in message.parts: + if isinstance(part, TextUIPart): + prompt.append(part.text) + else: + return JSONResponse({'errors': 'only text parts are supported yet'}) + + return EventSourceResponse( + sse_stream(self.agent, '\n'.join(prompt), deps=deps), headers=VERCEL_AI_ELEMENTS_HEADERS + ) From 0018e116e5dfb4d22f42ff2771f6dec5d2a71a2f Mon Sep 17 00:00:00 2001 From: Samuel Colvin Date: Tue, 16 Sep 2025 11:26:24 -0700 Subject: [PATCH 02/10] fix sqlite --- examples/pydantic_ai_examples/chat_app.py | 10 ++++++++-- examples/pydantic_ai_examples/sqlite_database.py | 4 ++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index 95bf228571..39691827af 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -47,7 +47,13 @@ class Deps: chat_agent = Agent( - 'openai:gpt-4.1', instructions='You are a helpful assistant.', deps_type=Deps + 'openai:gpt-4.1', + deps_type=Deps, + instructions=""" +You are a helpful assistant. + +Always reply with markdown. ALWAYS use code fences for code examples and lines of code. +""", ) @@ -71,7 +77,7 @@ async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str: ctx.deps.user_id, f'%{memory_contains}%', ) - return '\n'.join([row['value'] for row in rows]) + return '\n'.join([row[0] for row in rows]) starlette_chat = StarletteChat(chat_agent) diff --git a/examples/pydantic_ai_examples/sqlite_database.py b/examples/pydantic_ai_examples/sqlite_database.py index 491e5020dd..9d470a937a 100644 --- a/examples/pydantic_ai_examples/sqlite_database.py +++ b/examples/pydantic_ai_examples/sqlite_database.py @@ -57,10 +57,10 @@ async def execute(self, sql: LiteralString, *args: Any, commit: bool = False): if commit: await self._asyncify(self.con.commit) - async def fetchall(self, sql: LiteralString, *args: Any) -> list[dict[str, Any]]: + async def fetchall(self, sql: LiteralString, *args: Any) -> list[tuple[str, ...]]: c = await self._asyncify(self._execute, sql, *args) rows = await self._asyncify(c.fetchall) - return [dict(row) for row in rows] + return [tuple(row) for row in rows] def _execute( self, sql: LiteralString, *args: Any, commit: bool = False From bdd321d1c4403729d76c9be31c2cd74778724391 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 8 Oct 2025 12:01:13 +0000 Subject: [PATCH 03/10] refactoring --- examples/pydantic_ai_examples/chat_app.py | 2 +- .../__init__.py | 0 .../_utils.py | 0 .../request_types.py | 11 +- .../response_stream.py | 31 +- .../response_types.py | 0 .../pydantic_ai/vercel_ai/starlette.py | 85 ++ .../vercel_ai_elements/starlette.py | 69 -- tests/cassettes/test_vercel_ai/test_run.yaml | 849 ++++++++++++++++++ tests/test_ag_ui.py | 1 - tests/test_vercel_ai.py | 820 +++++++++++++++++ 11 files changed, 1767 insertions(+), 101 deletions(-) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/__init__.py (100%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/_utils.py (100%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/request_types.py (95%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/response_stream.py (84%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/response_types.py (100%) create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py create mode 100644 tests/cassettes/test_vercel_ai/test_run.yaml create mode 100644 tests/test_vercel_ai.py diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index 39691827af..4e2f29ddf7 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -16,7 +16,7 @@ from fastapi import Depends, Request, Response from pydantic_ai import Agent, RunContext -from pydantic_ai.vercel_ai_elements.starlette import StarletteChat +from pydantic_ai.vercel_ai.starlette import StarletteChat from .sqlite_database import Database diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py similarity index 95% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py index d7dd60acb6..4753ca90f5 100644 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py @@ -249,15 +249,16 @@ class UIMessage(CamelBaseModel): """ -class SubmitMessage(CamelBaseModel): +class SubmitMessage(CamelBaseModel, extra='allow'): """Submit a message to the agent.""" trigger: Literal['submit-message'] id: str messages: list[UIMessage] - model: str - web_search: bool + # TODO (DouweM): Update, make variable? I like `inference_params` from OpenAI ChatKit. + # model: str + # web_search: bool class RegenerateMessage(CamelBaseModel): @@ -269,5 +270,5 @@ class RegenerateMessage(CamelBaseModel): message_id: str -RequestData = SubmitMessage | RegenerateMessage -request_data_schema: TypeAdapter[RequestData] = TypeAdapter(Annotated[RequestData, Discriminator('trigger')]) +RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')] +request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py similarity index 84% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py index 82da300014..b721192f03 100644 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py @@ -8,34 +8,12 @@ from pydantic_core import to_json from .. import messages -from ..agent import Agent -from ..run import AgentRunResultEvent -from ..tools import AgentDepsT from . import response_types as _t -__all__ = 'sse_stream', 'VERCEL_AI_ELEMENTS_HEADERS', 'EventStreamer' -# no idea if this is important, but vercel sends it, therefore so am I -VERCEL_AI_ELEMENTS_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} +__all__ = 'VERCEL_AI_DSP_HEADERS', 'EventStreamer' - -async def sse_stream(agent: Agent[AgentDepsT], user_prompt: str, deps: Any) -> AsyncIterator[str]: - """Stream events from an agent run as Vercel AI Elements events. - - Args: - agent: The agent to run. - user_prompt: The user prompt to run the agent with. - deps: The dependencies to pass to the agent. - - Yields: - An async iterator text lines to stream over SSE. - """ - event_streamer = EventStreamer() - async for event in agent.run_stream_events(user_prompt, deps=deps): - if not isinstance(event, AgentRunResultEvent): - async for chunk in event_streamer.event_to_chunks(event): - yield chunk.sse() - async for chunk in event_streamer.finish(): - yield chunk.sse() +# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol +VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} @dataclass @@ -136,6 +114,9 @@ def sse(self) -> str: def __str__(self) -> str: return 'DoneChunk' + def __eq__(self, other: Any) -> bool: + return isinstance(other, DoneChunk) + def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py new file mode 100644 index 0000000000..62e0a68964 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py @@ -0,0 +1,85 @@ +from collections.abc import AsyncIterator +from dataclasses import dataclass +from typing import Generic + +from pydantic import ValidationError + +from ..agent import Agent +from ..run import AgentRunResultEvent +from ..tools import AgentDepsT +from .request_types import RequestData, TextUIPart, request_data_ta +from .response_stream import VERCEL_AI_DSP_HEADERS, DoneChunk, EventStreamer +from .response_types import AbstractSSEChunk + +try: + from sse_starlette.sse import EventSourceResponse + from starlette.requests import Request + from starlette.responses import JSONResponse, Response +except ImportError as e: + raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e + + +@dataclass +class StarletteChat(Generic[AgentDepsT]): + """Starlette support for Pydantic AI's Vercel AI Elements integration. + + This can be used with either FastAPI or Starlette apps. + """ + + agent: Agent[AgentDepsT] + + async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: + """Handle a request and return a streamed SSE response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + try: + data = request_data_ta.validate_json(await request.json()) + + async def run_sse() -> AsyncIterator[str]: + async for chunk in self.run(data, deps=deps): + yield chunk.sse() + + return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) + except ValidationError as e: + return JSONResponse({'errors': e.errors()}, status_code=422) + except Exception as e: + return JSONResponse({'errors': str(e)}, status_code=500) + + async def run(self, data: RequestData, deps: AgentDepsT = None) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: + """Stream events from an agent run as Vercel AI Elements events. + + Args: + data: The data to run the agent with. + deps: The dependencies to pass to the agent. + + Yields: + An async iterator text lines to stream over SSE. + """ + # TODO (DouweM): Use .model and .builtin_tools + + # TODO: Use entire message history + + if not data.messages: + raise ValueError('no messages provided') + + message = data.messages[-1] + prompt: list[str] = [] + for part in message.parts: + if isinstance(part, TextUIPart): + prompt.append(part.text) + else: + raise ValueError(f'Only text parts are supported yet, got {part}') + + event_streamer = EventStreamer() + async for event in self.agent.run_stream_events('\n'.join(prompt), deps=deps): + if not isinstance(event, AgentRunResultEvent): + async for chunk in event_streamer.event_to_chunks(event): + yield chunk + async for chunk in event_streamer.finish(): + yield chunk diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py deleted file mode 100644 index 59868afeef..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py +++ /dev/null @@ -1,69 +0,0 @@ -from dataclasses import dataclass -from typing import Generic - -from pydantic import ValidationError - -from ..agent import Agent -from ..tools import AgentDepsT -from .request_types import RequestData, TextUIPart, request_data_schema -from .response_stream import VERCEL_AI_ELEMENTS_HEADERS, sse_stream - -try: - from sse_starlette.sse import EventSourceResponse - from starlette.requests import Request - from starlette.responses import JSONResponse, Response -except ImportError as e: - raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e - - -@dataclass -class StarletteChat(Generic[AgentDepsT]): - """Starlette support for Pydantic AI's Vercel AI Elements integration. - - This can be used with either FastAPI or Starlette apps. - """ - - agent: Agent[AgentDepsT] - - async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: - """Handle a request and return a streamed SSE response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - body = await request.body() - try: - data = request_data_schema.validate_json(body) - except ValidationError as e: - return JSONResponse({'errors': e.errors()}, status_code=422) - else: - return await self.handle_request_data(data, deps) - - async def handle_request_data(self, data: RequestData, deps: AgentDepsT) -> Response: - """Handle request data that has already been validated and return a streamed SSE response. - - Args: - data: The validated request data. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - if not data.messages: - return JSONResponse({'errors': 'no messages provided'}) - - message = data.messages[-1] - prompt: list[str] = [] - for part in message.parts: - if isinstance(part, TextUIPart): - prompt.append(part.text) - else: - return JSONResponse({'errors': 'only text parts are supported yet'}) - - return EventSourceResponse( - sse_stream(self.agent, '\n'.join(prompt), deps=deps), headers=VERCEL_AI_ELEMENTS_HEADERS - ) diff --git a/tests/cassettes/test_vercel_ai/test_run.yaml b/tests/cassettes/test_vercel_ai/test_run.yaml new file mode 100644 index 0000000000..f4bd74e5f6 --- /dev/null +++ b/tests/cassettes/test_vercel_ai/test_run.yaml @@ -0,0 +1,849 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '301' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + include: + - reasoning.encrypted_content + input: + - content: How do I get FastAPI instrumentation to include the HTTP request and response + role: user + model: gpt-5 + previous_response_id: null + stream: true + tool_choice: auto + tools: + - search_context_size: medium + type: web_search + uri: https://api.openai.com/v1/responses + response: + body: + string: |+ + event: response.created + data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_00e767404995b9950068e647f092888191843d25c5285ef895","object":"response","created_at":1759922160,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"web_search","filters":null,"search_context_size":"medium","user_location":{"type":"approximate","city":null,"country":"US","region":null,"timezone":null}}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + event: response.in_progress + data: {"type":"response.in_progress","sequence_number":1,"response":{"id":"resp_00e767404995b9950068e647f092888191843d25c5285ef895","object":"response","created_at":1759922160,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"web_search","filters":null,"search_context_size":"medium","user_location":{"type":"approximate","city":null,"country":"US","region":null,"timezone":null}}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":2,"output_index":0,"item":{"id":"rs_00e767404995b9950068e647f10d8c819187515d1b2517b059","type":"reasoning","encrypted_content":"gAAAAABo5kfxLc0mW1zaOXGjnG239Cj0OLbDS4R2vZP9Qkm4_OcqmYRLMgU_KvshspEnG73vwwg0ZJmuTP56pmAg1NYvUTVIGdfQ3vXswO7VsH0mbZYxDgcbGS8638Q4AZNyrd3cYJOa5mJDsVL5Mxu5Y6GmwW3SrDWQv3aNBSnTXRRko0A1B5zWWRsHngMogVlauGY4aUbPPSTtJZucgoLiirHZ5ROWD-NYBVRbCmhWCoZGOUKFCO456KqurwtrhHc_HSEWnr0ohnYkh3gmRy3HWhDopz4lKve7rxtPD8CTb9Bmzss3XogM5KgopyRU1XjA31QXyMNvi5R9gC90FMokaWnHbea3MUPsJE2dw_wJ6NCYkEf8YqVMinTo9MV9hH82Oq3FamabeE8U9911E_sPl1BNH8wAOQLxm0Zi8Je8ulp1dHoyqZD_vtfzx4Yue0VwuQdhyHj-YMpPEGRWflr_gHePrYsMcM0LUAKyex6FpWRYoJlvJp3kjDi_IwDxMVYCyEjO_oCzjCTrf5XVTylvHXJjec-XDq1p6RsEeuJplVt460cydXFCjAsJ0Xh4ELcIrePipT5cSSQ5ZppDepss1eXHywuYsYoxaOGgXv4b9-_BoJKsdhp5lF0pab6RrA5brx6NF_hTB33CvBMrmbaNUiC88fMQQ6b33UAXzC1gqVf4vQ4r7wgysnfP5xzDZMJMvcvx_TRHWpRhjt0gUJuo-ql6tkgmQJEUiNPHfEmc-AN0_KDqdmT5TIyZ537Oe-TT6ZmWrPB-EzcMYntWorj-HdgYCEUhn1DkomMhEHQ3ILbMVhAeM8upzCqRW1MnqDpzIkj1_lIC","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":3,"output_index":0,"item":{"id":"rs_00e767404995b9950068e647f10d8c819187515d1b2517b059","type":"reasoning","encrypted_content":"gAAAAABo5kf5xn6cAv4jZi6LDABpXHtrd1qzU0V68_w3dCOTzg7EVKvB8i56Yah1EC7B4i7Zh3fO1H3Q6sdGS-jULiPLQdtu6TOMlNeZir_mGMVRih89fRk2UBKdwh0YywoHpv7xQLHT0qOtbVxGkzrixkOKe7B-oFGVq7v5Zpz-uD-FsyZCMikyKgVI0MvbbVHHBx8FJULvlVOAKyUQesNtZGnwobMJvUgnOUM_7rPzlC-jpzD1hwURJ93QvSsP1Klpv9Ebuqbxih70HsMmjNqO8PKwRLUzo7IqKGUTarrq0eOZmDird01aNC8ao4paWUE92fEbT8Uzv0QPmWEBjQVUgm_9gL4E0M3XzlWj5hGXrqDBJYNB36ciCJyBNSS4tx2LFkqM0UDg8u8k_7yDdVyL7j-CLz095-jT4IwCRMuKuBjaqIetw42S7xEeTrrIZPGg6oqAgrz715lr1kz8NoO2aDds-RZ780FXcptue64fFy19kRT4tmRlkeUT4oHAlLcJqay-Z9EFtbKBtazKCZyPKDCHiVWQa4tUTlq6SsAvJAkgLMV18qO643EvVhmWsm37Jrb2XaiDydgNQ65olaacS8nH3YuM9xDhGatKuyo8BcQAJdHq6tRHJpSUDG8awLuGckKQ8h2FHPeztkj5ooIwb4EvmHcOTBn1KxpXC4SD6lS24Ob7l9G4VB7udxDrYyFM0d22fh-gdRcd_axy56CTn_wZIyunQwIhQXHAkF-A_tdsrhHLj2V_hQ2ICdPmtZ4ZWr7kGoiGW4RWr75yd3pqzehr64pDXn3HTZ_a7vyMSSvkQFYUZfTRU56cE_09q2MZKfGAd1_X3Tqvh2EJwwXGXi7NwjRt0deK6TUjVmLEFkiRrROGZ1XcFDWoCQs9kmaHUAEWqDKaJJ3Bt6PstG-ZfZlzSouIPpZ_aUzwIuVkYclzT_d6PPY40m304HVHoonODHlNYLYlNaQCeH0th0KAYQmGvt6JE8p5j6UYrYPCT8zXW5YVppuIqMAmT7qOwz1GC1DAFkZNkBVc9QWPeK-38leDqbNzNKUSsBLom1UwyBdJE-6GXX-zPrUr229ak8PT2VrrkiMvKoR9oOqdn0oMlRkCvreOvq7gwUHRy-yhkLtSAEmD2PoK-hPCefS_aDsLs-kscZOY73NrA6bhCAkh5KrUylk3-LKWJLXooZoCvu_ZaG5w8Gi1tBk1F3oa7WdXGeTnl8oxUuDFoiuLF4GASVmGIEMxbrSJhrpkXkZdpWzQAWV6-XK7owSgy8QBkaEy5qu70NwCS90JpvBTSmoe4EDvVwTlgpkCmQrEItynZ-Atma_L1TLHobjJcYQ7muZmFXhforiS7wrga-8oGy9Jch4y93xwLzWwSl6UWBmgqyhbqUiB9wAtituqlulU68WN8iAOaLR-zuPkHJ5TXrGyguReCAwWjEZXFjqtauX7ueoEdb-fKluBVMlivCZS0fXyts47doGzozeKaZg1ahDKxudeoZbGQR3fb6yHeLTM2W2iuk_Tpwp6Rc4Qj6EEFf5k7mTy5W0_cCzVOlv48Qn55OrnhFb8myAqtQ_8-hDt68FVuA-HCfY6KnByNHTBvKLe8Bt9BA9uCVJSXhTytauMucZXvGluISQZFKxEYpwrUkAtI4dWuGUEPaLOGv5oFrtW5-mBLKyinUKhjW0o5FhF8juCWkQ4Tm0IFhQcXasnEUDp3Z--xJl6l-lYurxa9nRrbYco4qbMj6fxHcDcjW4-s_3ZNAHFvgmLJKHvd6dQo8lzwuXw2ZO6I71htJnhPdfo8456tiiH0J7qc5i0jB5CUnttTupJMvGOr5DyrNdGzT9KfoJ4mt970AaIg3NDAsLP0mnZhsVJSUKeLxkoUd64mkhnr8Hjnjbk06QEUmVtyO3ha-jFkCVP2GjwLE2L9gWWQ8pctXuVAtHn2umpoQbcNELKgv9P9rK6eQNTTeVgz0ly8N8-P67SueDSh-Bf5nrUYmc8Yz9hxlYsemremEjftbCpODVpgnpnDINEnH6jELjameMmCJcQDzDIaVLNMM5z71wU_n1RDdWdnBdUKTy0B0Qqds9DHLyP6RGC7NQHO8EirWcpiMtckYDB6M_8QQI_ZQ3ORgmzoWgAOSmiOzlIiT3XNc9G0ts_zZigU4iIBm_BBXYa2PjpO5ijMiZDGybqrpzuNChKAY9f2XFl2YDt11cR-SUFV56dL0Ebc3BESJQ-1CpeODHqqLPd3iRWzUyyT6UVtUo-x9A70HbnJ142QAo0MvZ5Sd-q-TQVrKDMUvXZRQzQ3x3NISPPO_EIOXguwa9EjcLRpV3ldSVpNNTo0O2kQh8HTyH16kGw4RjMT7Qn_DJWpe9lXPwQyI_eCzacR_FwiOOUt5H2cgKgAI-TSKnUm7oFm9lTSPf2FB4LmympN96zX9a1o8YsoYKMnuGypCCn65qblBs0ftTswO2gvZrhjWX9MB7ZlAaA5MgHmrKcLb5ICe5XY3BgGouOWBWkjES-tjAogrUXnBAN7l99g5rkAfAfZvL4RX2H4UGvxHqRUrBME2IIUGKCK5joD-PFZL6x-75S7Kj-vLm37UXglF-RiN3AJbrFce7vzbu2GcLgJMwd4GilTycVUNCIKyJypMs9RbkeUDMeO3HIIyjn2zwNIQGWq1ZSL5yrkEzpBXd8RMCwVdpegZYzZwPhGH673xoj4CV5P42QRZ2qJcjjtGf9E35FCqCWb9ogKV-MWYriwKOFmNN4GKGV39aGQj4NHCw7Mwth5sD6WpoE5BaMbMBEfy2uVTfaqT0KZ7fRcgy1hXOm0A4qDSS3zkfsh4h7Oni2fjPPkwuh_DLx2Y9T01sJ1vYd79Gsmxlmzor-TjED3I24phJUgAanOxxcRD8GIojeiRTI4Tgj95q0SfkCzy_DY-wD6IIHjEyL4D7JO0Y7U_UADJN3ZAoKHWlZl3OSO7nw_uskVM5FB7jqqDfr5P0YBMQMwWjVuAibgMnt-9rO4NPBG1xifL1GwcBvFW30cgxE2SGFrOcPqyc8HS6vc55DgUS4kpPOkN8sTOP8wBT2_jEKzRde3ByBDa7Dyx1hAuet_huuNmYU6mGRcmKCqWc6qW6FJH4sjLSvUW9UoRDMn3ZpVBsHVtdiQAB8RlegtJNyr61gexss0Qla4InT65EbkB1carYnCdeQGyyMUKk-YD79CapD_BtRnMJ5twCFampVIiQn_tANwdMEX9NFGgQgnczJ3MzbrEHHA3iBhapWxWD65FEm2zYrDUaeMNm8yH60RuNKyZ7aZRo-sGKrYWMU6bZ4YwuRbqiTNF1NPizcpGrj_2k_hT-TcbtPnPNAMdXzTK9EB2QHfsQqmsqOImtx8q0vekrqCSWjN4H1xRHrYPZMvlvTdtDdhiWI8WSatsPludVXeYxqXxOy2XulPjzkiVtpgvMUwTGH_Yh-kUP5C94ZuZ3WDM93Gy9AsDdaNScSC_m0Ho4inuGpe5b_ctx7MNCdhdzl3PXwl67Fj_0E3qkdmDS6McKk0S2fOmzbvoJmaL7g2g_QVCCpxA-4vmmecak3J5Me_2EW6sHd5Zn1tvcFezrcmQfcZQbax7yoyZk20egkaQqpkli-AKjSUKBmP0aSR7qrkvAe2z4mgl4yP062GlC2YI3r_8xedjsg3jWRHyyJ4D_7Y0rtmAjVwhi1tfl0XK0JCPKVkwTTCBSbwVQo2yR_zc7zIazj9TDm8qFK2G_Q3QuEqz-Cz9VGQu_BPhrMaH1WaB-2ays6IUf3s7_gUavAAynaEM9kGGHj_t8zkyqDTjalRl0JE5T5PxxYdv1G9s3MU4B0yB3H_LhcDyjVAb1WzPZpcAxxrcBdpLVFa36ewDzKRmVmSONJd3fLtB9_HJhXgtFIIRgAKuhmRf1hMAG6qaJSsJNA3HYRZigy15OMe4RgkFSl1aj3ScrJc1-N3Si44OwqRIJGrVV2ihh_euAE0l_ImdW_vEgpRbgfMqBiDmFbxgdcv2cT_MaOMoFX3n_V1GrAuBzjrVQSBbhgau7nWsynSK4h9781waWYMAZK4LrOK0UQ4iyugcpj0JNO9k4WvB-yRR9E2rtIoEC6Nz0=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":4,"output_index":1,"item":{"id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":5,"output_index":1,"item_id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":6,"output_index":1,"item_id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":7,"output_index":1,"item_id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":8,"output_index":1,"item":{"id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645","type":"web_search_call","status":"completed","action":{"type":"search","query":"OpenTelemetry FastAPI instrumentation capture request and response body"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":9,"output_index":2,"item":{"id":"rs_00e767404995b9950068e647fa69e48191b6f5385a856b2948","type":"reasoning","encrypted_content":"gAAAAABo5kf6RMw3cYGYW7G695hqy-Lc0VhF9ct5CCX103QsnamWXw3ZJIXPr3hTUifg4sJ8liWn3c1CwDSRK_2RGcxOQYtowBwX1a68VoxQxQYrZnmnln1osLOAYnxvlEczg7IvnDJAtllxXHKmpvMO573_A-r1o6Wm_BP7TBMvQzUt9pIH_RCkpFM5xI93ERGaKCWE9RA1dWeXDFUccGx_lOJa7eif13UlaPTjICzlVynuHg4QVejjon89TBQIPprkAliKvQEANSEZMBrEexL1Ta1jU983iS3nyIHnAliDiyZgDWJWpVZDgcbb5keDwIgD2Rq6TFN0y0zVEERUjaAribiUoXSg9rP6-527IycrY9AFQgmTTCU0Pud-A-DJN_IGIK4MATMOOjUnluLnZMTE8pMv1aqlZpJV2zgZORgNucV3mOIeWIjekuO42TmIeLvBmrvkMEt2aZAayDVLysbvtxHoehn59q-rh17eGfDvoTR-2N_MjidI9WjWrBcH42OI9SVGO2Cyw1I7Z78PxPFn012iGJhyfLfT5e1xjUecFDPgRZjVz-Y1j3QVSB8tTFxoXhlrSNxrdIusMVR4_8f8L92aClGIHtYwm60lQx10Mo_Zqlr5WlhpCWf8EHMAUSOcqZ40TmHGUrAhHbl2XsbDHynG_GC3twjCOIjQL7JC7pAdojRCzsIVZY16L4eFZXIxxGNM1oYzHeVTFsIXE0bpBNlugY_FgwsliLFdv_inoWgcCg7Ujvu4Mg83vXCfwBtLCyBmu3_xKCoLcOryErEDMq3jp_Siwtepgs2gHXkJgu-ihE5TfoUodkM4jB44OBivPY0dwS_r","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":10,"output_index":2,"item":{"id":"rs_00e767404995b9950068e647fa69e48191b6f5385a856b2948","type":"reasoning","encrypted_content":"gAAAAABo5kf71KqiOXQLdnpn0X2dce5ZXQV3HkgzKX8X-CIy9ax4SrMhk5x7PGHiHFyqGHhXQ9VBYdbmpeQmcXGtngPW7v0BPz1pl4HVfrNGG2MVFgqx3xVgDR8IDDBtoX10qhGFzQOmp_V14WZWPvdwP7irv9LtSXMdDKvJThXM-s4Kp4vdJCPWk6PeLmQ-3bh6eXbxGB9jK5kifSBoJfgicrgpfpUFmdispLXzxdTvyUUMJDjg7p_AgdKwupWPRz0I95d9Fu04GdBZfJn2bcXbUUgKeKKqpOeQjmnxD7Igmce3W0iD5icZq9d7ny_dIfygoel_9JPBNIKJ05TRKVY4C9yiy8VJAvywl55gUdziTWDS5WbEggs99brazkEdWTvqhFcASgqDErzCeppQ0ACWy6a8F2Wgi-g0Iw_MiaB4zvA0VSDC4xOduXhWB4BUJemEp7rE8ztCN7FTKtON98gBixqyV4ueEUDp7SXwkRJ_d-IJMh8w12e3eTTqKs3uljEPDBZXnvPkeCz6GUIuKkEQi5mk4qLi-vZmH3gvhOe-dKJNhhN9CI01PK0bmwNdXr6loXsTmPhsLlp3Mwc9mk1RWvXm0TbDSGDeH1a5UBiqjFI7qjWANOzye5qeFEs-vqIQnC9SBDMMDlFlzLv7LJjsTnz7Q9TTuUx1sdPqSu-lLDM5OBaCdykS4gQzOZqomLJNSb_lMKrzzylpo6bYmV40N_jZkM71gF57n8lBYVmS4t-JidvqsqW3kVlGGDmZ38sA2I7jJmI8v8v3Roio_uNCzzocWCtbcPqgjNsvDzXxTLl_WTjyRfHE9Qdrj-KY565D-ynxlR_iCPFcOx3cwdnprXFf08Jx9WIlvVysra15pjj7WH32t4j_Bp7g3pI8ZVYGwIb4US3Img9D7Plfc3rAWI0d-RGaMFjjbsPzCBZeF9JPDcOwdGLa88ap_vZWkdRScEJffjZocv7FZywA_VgNlGv1S9bP43EuaqEXmF9aNLkMxsoaLLFhRRsgryfxk4jslG38F8BiuHhzxASD8C7f6WNXZVjJL5jW1GkhuYB1qrn9TpXrLR8mJY1Kzkt3dUtNhnIxGKzP","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":11,"output_index":3,"item":{"id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":12,"output_index":3,"item_id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":13,"output_index":3,"item_id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":14,"output_index":3,"item_id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":15,"output_index":3,"item":{"id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":16,"output_index":4,"item":{"id":"rs_00e767404995b9950068e647fd656081919385a27bd1162fcd","type":"reasoning","encrypted_content":"gAAAAABo5kf9aqabCKTlJxF74hcPXQKl5xUBRpxs4IzOqoQ1YUhaTBc7MowYLJQHHXnWYngx3hqtnk1ROPfLNDgQYfngIadOTZxwUoOrcEzIIEnMdC-JsfDfvyqgBrE8qDYNYkAFa-ml-G7Kuy2DRXhqrNljDK-1ILGgvuQALcWh7Vyd0erIWhbJORFeS5-OOgxCVHliruj6JHiGRYeJ-ixdQLrp6yZeRkq1Ow-1FMxblbv0kRuGYDeM5w0K8d8aC0xbFGiJMYCZonGcYG6MqeeF4v2w_9SBOGd-ty6r34TftOOLnWMmYGBKaBM0i1EbTBYrUH0gDwjc5SgiDaMfX_yJhZvtM3yAIk6wrE_Vbmnw86BrV6ZVNNIZjBlX8tQe_-k9qhr-y6Ufpmk7eo1VR2_05ryz3-7zMrzbgkGzj_qWXnVXBRkt4T2cwyHca3FHr1euqpJDOjSDUWIEs7zNZ2WNlCWPIZUrEbmSyq7ChhTU7PECR00wqvzpFSP50jyr0KA0L-hQI5FdAfrEanAOrPkp65aZn6ni9JrQxe8FLRjEszQ4oNvqHdWfHqsFOOhzJ14mLIm9m_2QUDB_crkJkK4o6IXYs7kdGfSzvm0z4HMNSU91PSNdrrKDmu_JTNcV-IFNuP_CBhvyEse4s5Z1IAwYQUxfXJb3mT-Oz3U8j57nNCLb0EP17xMujNuO1NFH5d-NmLWqPPX8YpithcX4jV1O7Yhk6JjsVO4eaFuYz_7BXNzQL5kbAi1spsrYy57uNIO5FG8UZTndcRRPCgyyA1AHCuqo2KTRr875fYgMLJcdAnkszV0YQLWCmcGmR0YbPCNdW8_CGfq-","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":17,"output_index":4,"item":{"id":"rs_00e767404995b9950068e647fd656081919385a27bd1162fcd","type":"reasoning","encrypted_content":"gAAAAABo5kf-GWE79PWmX5BUJGtdfBs_8-rH3_mU0ojB4l0DH9RZkOBh8uXVIGaJfTwN9yMTzxrSwC8kdSKuYrKAvpfn82Dq0i4KU0q9ZlA4mQuVsekJXqABXyBxSdR0Zt4k_hDaNRQDiSdl5z9qkDhupx3oD0QzTN2vJF0EntJpnQtiOkBlqdiFJG63olxaKLcaYk7w1UpPXHFKHxv_clxSb6Bhs8XJyquYVcuBP_qxal77tkyepJ4HQI46B_bnwS8LaIGjZ516MYzxNdIYPPc2T1TSACumXTsTLseOL2LseiClD_fSAEuCG_g_lnbtkMa95pz6-fTN2dmJaXcg05MSjX8YasVt1lkk8EzF2L_lhqled2ht2Np70R3Ykmlv8TE0kzCx1otr3WEhdi9xarx7pbISF0HuguVGp3V89-Vikge5LYIar1sMOOOFKxjVBhPvXqBB9sw6JCknfXKorDAj2shu9vGrPHX5YVVBvkJwPuSO1c2oV7SlW96Oy0lt3V9subpr6XqFtxO4QR45Kv_jLCs88_0W5ImjXA4vfKzWwP7vYuzWVY3xkaXXydjH2SzRZ3PpJBFzoNF7QjbnlOBlwKaPnPyGnA3gb-m3dNOYCloaa1Z6XIuNS1zlBHlJIHrJrEKMAtMJxBsmmjbxwu_nMbvdri1MzhtqTwySEB5rLTRYvYuU_3tnDbk1iuoB2kcDp_J6AY5eZScwsiyf-zWhTjgAyjdxwYytFKNzL3j1RTHwefI3U8JCmQfDk7kzfrBK3a_1Jm7MW_LnAhD-Nf9dbeNEBaVI-0LnH3A_xFMYLFOcINGe-5SoUYV88CWMIERuaSuPYoXw54S-fqUmDVVT5eNOFEKXWvuiUZDJBWYW0RAZzKXB2uLowcj1qBOeW2dxUR6s5fFudt45OwA7aDi8_ReiyC_RJ1NuOOYQ5YqxsAYZt6-NXALhZKgOapsK3ui6wCeaXGprG7IohQ1jj7rKUcRhs6JWxPSG8SaG9P8D3p_ahdKnSq6rXB9MpWmbtOSVNOLQk51RnMwwj0__6pHcJz-Tk0ZWr3dHBXbNKwxt8j89XF5bbNChGEZ458LcUE6EQQyjwHbqqxQRe325CHqhwwjgh3eXig_en9hHbDRyyYYFFeG_7ysZW5o6gO8QaI3Lo8JW0_PaY6e1um328lICJauuWTDKIWMFOUEahiz_eveoSHWJR-_mi2KBm4OQZ4y9xeffPmFs","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":18,"output_index":5,"item":{"id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":19,"output_index":5,"item_id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":20,"output_index":5,"item_id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":21,"output_index":5,"item_id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":22,"output_index":5,"item":{"id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":23,"output_index":6,"item":{"id":"rs_00e767404995b9950068e648022d288191a6acb6cff99dafba","type":"reasoning","encrypted_content":"gAAAAABo5kgC6pJVTA7kgbUftG9v1gX2Q9F-F39EeJ3t-43LUIAqjCjd7SqHqUWCBKWKJVZbucJs0MhBUziC8dhRNcp7KIsn8PNoO3dTjMcaqgXLmHSl_JWLYUPz9M75OhMpneCI9uMGycJDkgzMEWcwzyXGnPmGIupt_mfHAVNs9khlamVjjFO7e_QUT9s6V_bQD5dzdtwwIjoqZEjvUjqrbO-yR425vIjPTCzdhmHWJRCoVzD-Nih8_ubbkQfh_4i2zZjZhaLEuCqG7k2f_y-XlmSfLakRmEBAgpHXmkiR8gzjY2kYsBwL9jGrC-fFLez0bnz6cV4DPfI9c6obkWlI-t8YXI7DUoqgBbbIzqrviqhWkUjdqoCWFuFwd-3semR5RhLBlmP5tLJTv8Gomtw_aicnY2UpbJvnK6vj4UoOlLotfYueFCyMIgjKN1QbQh4qHwL2iO--eg388K5qQ1lBRB6VozpU12SyQQnqKzU7WDAsxkgdSgvoOY5P7Nfyynozy-Dts55htTNgaHiozBzzAqzGoW8J8t5hewSbbIDObPIst5QcmcnCW_t5Ny-rN7GEQyVn4WYXdMSP86oksp0rzep3DHK2YsZCqkEh3GMzxhHOwaT9CI0jctPBkhf-BcF043tWwO3C8lPy5aoXc3JS9cMLhbzL94MomR9D0dsj42t1OZr4bdjkrqwz3d8aK96lahPeufzKqj3LWQFECR4NVde-qgpNF8hnRoG_GBI61M-7y1nQNkPRpOLkxTnJ8vrb_YLNdXJ7qtMOUbfq_yhJeFvy8CfABuGHz3K_ykKsgKvRNgwNqspoMxAJfbVVdn9oPo8RNxpB","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":24,"output_index":6,"item":{"id":"rs_00e767404995b9950068e648022d288191a6acb6cff99dafba","type":"reasoning","encrypted_content":"gAAAAABo5kgDMdyWJm_4EBrnLS26HcJ_EvaKlVyP7dihwR4bkE1kXTcQkdj8PYFl5tw9am6ZKst4kTo_eR_ho9j0wrT8lpwlPyOvuF978cGUykoGJXGMlvBIEt7oBgrQGD5cchOr7uNqTfVo5YLY0ywfmTanxgyR3CD6xZiSrSRuG6e4xGfa_5J2FK6EOqQOvXoXI-KRMwXTlrqh3nMUXnUHCAZAQnplTHHg61Muu-tBIbG8dOXvvIxRJEomIJJdNg_g5pRhCvzqfzA7MR5_oBvQtkn_l3U_mPzvjfIITIJT4iCsnxJbYNE-XP8mokgKq6zZNfL4wi_az44_BshnwJA6fGrAmmMSEMgkFli-XYvTEleOqiHBttqB0ESRoztYo0Pxc_rn5AtsvvPtQUYYapaV4qlL3O6_Q95TsXBlnsmSwjTMSIEmSb-X0BMzMtoYV1srbiQiBq_LXwpI1owWH8rJsw4x9C60dWk7xY19_6bdcYE4QuRPnPmyO91pl33iw5hBOGckfKsMjERAmbsjcHSczOO4xa_30EqrDyx6py50KJQMFzFEdZQXdfM7lRIgbUW4ixWw8YP54vfVgSBNz9HsfRfn8V3jvQ0saIMW-M54aISG6hWs45EbtRv041W_J3SlYcDtOcjsusR5c0vd0Nl4VIECHLQc1ULnqkJZSk334B7oUppZNX7hL_Y6Q3m5mbqgbGsNRo40-W4I0lGTDBnSti4-BBEgXCcjZDQBpguyZR7F-sVUbli8kublzIkxzIYxXLpTFKGMNhBtsfyHEKXZB-I_bCy_rxlszOW3NtodoIdavaVbmbpCwXZAzOE4TJPPlQ82F7rEAX-sCtDzzqlvbs6ZW9AS_3r92Y_kx_apxgcl-uQ16pEXPlwy6IQHFhKQ2m0qj-yDc2C25DyJ8oVBK2vGje0Jn2ppsR1m1knfBP4UTogTMCp5MvWhvgsZgJsRRoL_WxQP9HoYajepLrRXft6yVfQKTh1ksaYIWxZeVfatBoFrUEQ7xVcbPVIduEON8tzPqzRTUaYY6z9HHpYennJmk4Iz3psJRHqa68O4WJN3P91FaUgwx600-LwcOimodB_BUsMBKAMkKMpDroLPoLwzAu3ubU_dUqmn2Dj7HEAjjO2WdN0yAn32AgZP1nqUjxed0Yght2LJWdiiPHAKr5DAd6uq0QTUqSm9dOJI3n-HgXMVNDasRYj-DbwsBN14qUm_zzJg8X28U5vXJZ398OhxRcS5x2uZ51xfPL3wwHy3uGpqUR9ZrMr8nEeC-FDYX--QEXVvCCzJsBdGDbW6ufLmMfbcsv9eAXQJEdCk1xvorwQXONydra0Q66ZC38d1rEdfYEa3FvuqK3o8ug9Jkt04RyMqYFjziIC2D2Jf55jSJhM8fGo=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":25,"output_index":7,"item":{"id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":26,"output_index":7,"item_id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":27,"output_index":7,"item_id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":28,"output_index":7,"item_id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":29,"output_index":7,"item":{"id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2","type":"web_search_call","status":"completed","action":{"type":"search","query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":30,"output_index":8,"item":{"id":"rs_00e767404995b9950068e648060b088191974c790f06b8ea8e","type":"reasoning","encrypted_content":"gAAAAABo5kgGPV-gvfGXSZGSBsZ1tPi2vexid6rlc1tiCGxsX_wkazKNg6x1Amgex25VFLw9V3MALILyIQY4Eiqlzr0IYWacB4uVN-u-56RdJqRLxRXh4cUkB5aKIYpFoVf1CHt7ONNkClsQy6cp9pDKrqKvNYy6zG86wWoMTxyk09HRTfrvzGZq4ibeQ9bqknDm4zGH50QyqG1SW8rhIiNFATbSSakM-_4RGOGMlUvLye0dxKFRcIVUJnR_cuxmwhjAzWgU4x2AJuHX13MDVtNe_Bv73xhyLxXeYHvVb_atvto5PBkde3gXMMJT0PHv_p1U3wkCgqpZnfFxKsInEzgmgpsPpThyLKffsfYH_gjs1jx7f2Q_Lni0MFBkA7dk2l9l2HFJSsWT4rzlypw8PqUmc6msnZTF9YC2QQhYT8CUElKnOZ-D9ILd3z9mfxzgVP-4f3zGF-nEBfwVXssXtQCx3ynAGKMh0LH5XxACuD2E-h7xgP6fUmBCEtdhHjSaJ9rriiJ0fgkBgbxJkP4tg31ue1TOIWjFfEajZeHmrpqpYetLENGrqEz68TTnEjl09NyQT_jtljWUisra-h8w6E3WXSVUhet9CT-KQq-cKvyIq4w0IvR25DxACLZ-HGVnfQelfA4p45dQl5AJepsT_yRnEIuw_S86ddjnxpOsDXTvnLNcKUq4tdy9pFdLKgfxjUAUymt1Dow2PUl0C6zC9h0rVd5C-3gQr7DbKOQ92fxx6yUvYe5k7Is_hIo0AdSKqzdjLDzc7Hr7pLuy_bPk_BEGbstjPNZgpy5Jo-l5EoG9ASMx5Ykk_mag3rvc6ULU0VjPGPp6rw_V","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":31,"output_index":8,"item":{"id":"rs_00e767404995b9950068e648060b088191974c790f06b8ea8e","type":"reasoning","encrypted_content":"gAAAAABo5kgKZNRzXIfjM9mJNtrOBB9QgXmxr9lqDw4vJJnfnrxtWXUO-0XAPYIAWwBgPImyrwdNXlHHny-QNhswuEa8gbgpPdhkhREfd-OY9s9gOe05wD2oXgLO_2S9kPLA0wnB0HriVNEYXhsxk93jMwj2XmtS6da0O8lO0KvALs3HT7EtVTNTShKqlumzH2HbA3tQFiln7yxVk8C_FIfDhWpzkLeqDGvIXy2GUyUHhQ3-Qmid9KMkj2jwI35TawAMvxckbMWcroYTRXsuVgvULRoGoU7npo_YFTvjQYIHKKKnMKzM0wBef8hPLgsYOKBVwvFvKyFph9fUV6TGDqzX9daq5Jzl3PgMVKm2o43HjN9NQu8nKf2Dg_VSIbfCmxq1Pde95MV4IuF7UEFCAREF4Nc__K5P4HRvroScjXiWur4FJulj6ngOMDnZDVcP1MchELP3KWe6L4Rz2k6itK_xDugXHnq8Ev3WuZfmibDePxi9KpLnjsvQ9sR2yRstKABto_fhbTuTBuH-CjiFV2nP-HuHlIymzmK_OhwX7v6BHvEfTdiCaDQQUD4a7uo9fwCdHK7mK7CFQynRU42fJzW35phx3QJv6XR4BzxzRUavJy7ZqoptixLGI0A9_8QLvh1BluN0ysh17Q5x49Dr5Nsme9kp2gablAXNbz5PWhrHypVba-GAS9K1AFiOIOLYDrSL5o1-QeaJPT0HK8Bvog0fMBuVP3-4UNaxDHKrTLD1eyTvhZpHCctA93zPlaOh1VOsY9k8C6wAFF2BfvUsdRraQZqN3Ec6QxU3WZAGuxqoWlqgzOzxT7OzzGTRfaa25797koiBwy8J7ov2A0Uu2-GsvJKWwv3Ncc9trFH1QaigqB2lHRi-zWAUAz2Zc5fKB0BXP5NoolEHTk9__VEqOBtXMzEbHVzOtVujoX6Xz2JR3AFs_y27UTYbFxpWBEw9zUHw2K9L5pNpLDJxw5Pe6k6scSEajcRTEPXrg7ztGJlhC4SkTuJT989O6MgZDp0U74FnWs0Q89PCgAIKJWTlzXUpqr7WKva11p_MHlLUJBa_zruktgENfp4h0w7WRNdENl-sUJCqZTfdzdcBH7Nvwsg-qq85gLw73JA5-5p7pYkuD-bfk1rOECiq0pufDxDr55hoZC_QeGuGpjgoJpYeYbkPNQR3lKa8GJBL6liu8pmDNbInBkLPU0OIfhvo56shLSdifP2Qc0UnaNARgXGtVIwmgRHWy_IqJKLFB2YUcawSAv6R19IzlLh6tkJVWOAe4gRckuU8W7eBNeY3fPs6ffRE-A7Bx-RYWtjM7L5OFYuB3y2vPVSGUvAcRxmTfLqOwtZao7ZPcldM7ftkZHMiw4wdFGgqGxUTSRzYS7ESMEGLM3sAp61BLB_X_8qMDceC3Kpo2rHiem_TILbOrW_vDnuvfAxB1oWBrr3IsbHb_BDe2fOEnXT5KlJ6XWc7VhCHKi16RJH_-tpN_YHlncSpjjtkPvu15oVB9gBXJ7kvLATMygy4L3_B-PkjNgVkf1-QmOgJ7GPtxk69jAklSKHvVwPN0s55u5RuvKroyZyoNdIhiSY8EvMjZT9MvmcBgUJNGS7ZbKegVeMR-kcsRxSmJS1_png1lv4bX8x7OVONm7kSC_YBFufov8ul45veYCi6ATuGsbPrC_QKp5mWJfYjnopTwKxs33GvZ_q_AXnmRC3zl2dqcu0cwbJVYniVS1qNIi7G9lxUi9Zzlss5bt3jAvVXbAO2GH1Ij0IrNbGB7SlmJLbnochFU7HweHn9aNWTD5khgwfqwZZNli13SS7r1EmCyi8Xt0KB5MAGOz86o_xgeBkIyaxv4ZSb6wY6g2oVu2XPo1229iGh5-_qHhFvpkDXfzIyrAAbs8ZD5ScXlUUgBzK_JOJImSEP4VSlQdgMBl8KLQgQpZtsPhtdCCT8nmhFRx0IYZxs2yLJMukWyY4iUQ00UsLXtVUyufQ08xjcdrVqevdXvJpVA3GIdcktxb9U5wYdMGOPMUFmPejqLZvC8dQwbWOYdGATfGuPdgv9LS3OTpUXkBnfYtTA3P8zk3RxWFCgDCjekxVgHPJ11BCMZFxw2nHjdqFjyAcISvwf9ix2UXCb9DSdmvbjqUTaJBCQD83JZn0lowtdNwTEWG1nUln8Qe9DIn7Ly8Vj6l0Fq-jkllBa7aWUTBS1H929Yx8gpYRApjBhPCvwVX8o6Lp-7qeb9PN6vmgUDfAG-8T825gkMVwyQVhnQvUlAL3D3Mh4YMdPHS5W83YyhawYvYWRgxcVis2oEMeSvr93CpbvmnbtYPRiousGdxu5Lc0KTNop4y0T5hwHGMpgrP4u9H_JaSBUaf9CO0dO3LyZ_x-rdOM8K70m2vmYDxbEHS7I5EBCN83PJnB8_HTV2KgmUsJcE-5OdQk_AEXikVd6ujMum2gA74MbOJCI89NmC6C5Hv9OS8lzDaaJi3KnhCvqePBweQLQrWTT0mc=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":32,"output_index":9,"item":{"id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":33,"output_index":9,"item_id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":34,"output_index":9,"item_id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":35,"output_index":9,"item_id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":36,"output_index":9,"item":{"id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911","type":"web_search_call","status":"completed","action":{"type":"search"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":37,"output_index":10,"item":{"id":"rs_00e767404995b9950068e6480bbd348191b11aa4762de66297","type":"reasoning","encrypted_content":"gAAAAABo5kgLGc8_lD1si00enbgvnvB46KkRQyUMkOtLBq8HAyHqzCFp0CGUaBOXEidqCKAuoy4CZFGpoUNKF_5M-ovQ6z7ccWfzsOw45z_DYEybftvgIHszxR80FiQr-g58dNDTEp5DwTF5nzS4D0aun7gCTHU9_sp9hMGOq6POVJP7dJIEmoVqZ0Ymnqso-mDtyk0Bl77a5AauYVZ5z1cNmtEha-gqyGCx0LV0Dsb4SuoYK2JhM_AjhYsifQWq73PzbTcAAiERWLqljEnsJGaGBBvr3j8bV_OxnuNa6ql8dd2LKCh-hMfxqBWkLHDHIDICF2sQEvpW9vHkfI3mUnLcSmcVy0ro_KJbI2pbqwPjisSwTsauv17urDFFtx0hGJEwbCVYst2_tUYu-anwkl_f-yBnTSGqL_t5aZnGtK_cWyLI32VDZhTyuHSnjbgDX0-QcaLIFwC7u3pP0XcYOHqILPLtRAPR0HvkbwmMSMxh_o3Digx04okk5PIXfrv50x7Sn8f53vILtMffUh01NLEjtl6DySuxk2d7WptAzZbZ7rws6SggDAQjIBatDe4lDaY68rItn7Rc-gAE-_yqAU6BnNKiGLGrzLnULXIv0wRnOhy-afbAjn9_HAFUQg5Di9urbISnZYbwK0_om52NXOUCzezRGP1VR42_S3aTxHUvt4Q3Ub4tOfniGLeqxzckr-7V8Q0ZggZWe6ca61OTwbPAVShVreMaQZHVEdEdVlD1D6FYrvC1xRXde-U6c-h8AyS-FUTny25IUjB06ChaLBrkOtRW0IFkIqALDxAmwCfEKX2vDofpb9jm2PoDG99Ej2jC0hXlKXjY","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":38,"output_index":10,"item":{"id":"rs_00e767404995b9950068e6480bbd348191b11aa4762de66297","type":"reasoning","encrypted_content":"gAAAAABo5kgO1HDzgclsJ0nxbd2rPdcO-gp--TPErZWuWzl9b1tNkttOes7PJp-a5m3GggvshsVGOf4Bm5sgp5_xK6z5zpwoyaQVsac6roXrG4QpXIQrXo-2zZUBwnUjP_yV51JDI2Jgjb1kgcgYz65WB0soyx58CMi67hVyOP6dRvR8wfjoBPEHq_rTf3J85Oux3zIH1rQFviHzwKx01RqPeA22bjpNNoYXkZXMDFvBK6xEMox4MzQjPTjwvymDreDTQImB_qVspiqN5EWcRLWEkTSzM4hu8BgI4tW6zaH530pBL95QqkaWF4VdjQ3c6yEBXXJMXIgmvP0avmgQh6fvMAQ9XVeAWe0tqXMmZ_bU1TCqF4wPYau7hJlLaZqxYzN1JH4RHaE9f6oDAc1zy1n1aUL5qtSiCFD7TR5ADPsdX5D3sa6j43uGGmwrUfHCKnxdZcLY5r1FSHpYDah5pKbxGLWdtG3IxT8-c0RadmtIW8j3nrDy0RaZlLjDXaD-IYxsyRyZLIgZU9IvcT5VKJxGmNpWzt5HDVvv_IUAx2FtbTkPRgmz2rRVR-jx1PoAB5Gf0A_DhTt7zlAjjue5GK2GsLqYwv_BoxmfT2132a6dIo5w9JA15W8j4Zlizm5uHL8iLx8lKLwTcHL5YksNUOVp1ELgan9KSXMj87YJabHDq4iio6w9q0EChqL9p4iP7BQvpheHgdgSW-C6H7N3jQZdSq9TTKqIeYjPnP_ZHXubE3jzhd6KWe9VCENUKPXXEuwUHZKcra7NHnmEDJrZt848sw_T3Lar0mnTuuh6-mLjI7rIuxy1VLmLdNyYuWa-hqbQ1aXiqB3E3VKkwhjMOQG8CWGt2jjnJC9OQz9LYrkn-8n-R3jpOKAwhIla4wDvdh9mUFa_1kiHeNSHZjcXnWXXCKDeHs_dZwHkkQ-FL8VO0JW4jueLPZD4EfBPkcEv68kLtI7EcG2yKZ1d2shdCiFrylbAz3bg3MpeN_B7QtusgCaCKDbI5xmwd5hCR6KiUKjaq4nsOQh2y6gfjI_jdWKM6VD1AihhFtNvHDh6wrsv3q5PD5zaoLvMm9maxSLWwJpMr9dfEQ1X43Vs_Lc_PviJIdIHOuo2om-fwRy1FSknvsDSuktqC5g1oNTmcj1F9Vd2uTG48R2-c3USzK5UvJE3qsuIrzzTiM-mTArvtA0R65wni3SzkuppSvR_U7doHWnfHRl5RTzJqZGzZ478r7gWobbVGMDtNM-D1hU09AIq2dqt-IEteNXzSqluyAvdEQwgTgJrznyN-bGA5lbdUlpG70-t0aBiWplVzLVN6GmFuCO4nlsHvz_PVdx5GiahupscwERmH8rJVGP_UAfjnrcEpMt-oL-D50tk-c8hKR92F022b2QSJswabCLzX3kkduQfhWYuWAj_5FTW_8F-Y5SsawarczeT2ajbctFQyCEffxD2jEJEUOZY2wfp1XCcSgkycp6xo29Vnf-1Sk0rpzC6pQE_i_4jX0yzViQcu-YjTzwcp_rMyhEDzYPOVAlDDwdfQscRr9XoZSZ-CNhHYA7C9dn62JHP5tWxVtLxFuLqK0WNaL7l58qHJlAsb_OoWMzgeH40H9CkJuJqhE2SC3HEJBAZyvElvNCneC88YVTNqC-i0GAGyAN9KhAUkt9WDxtel3Q_2W50FEe1FIqMjrmfWwpG9oA6dMk6nrAmN1HhkOSpmEtTVcp3FcaBGQOkm7L51yG1A8FH","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":39,"output_index":11,"item":{"id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":40,"output_index":11,"item_id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":41,"output_index":11,"item_id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":42,"output_index":11,"item_id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":43,"output_index":11,"item":{"id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148","type":"web_search_call","status":"completed","action":{"type":"search"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":44,"output_index":12,"item":{"id":"rs_00e767404995b9950068e6480f16f08191beaad2936e3d3195","type":"reasoning","encrypted_content":"gAAAAABo5kgPlunNex6BuTxOUKCV2iqUXdTuaWPf158hICWGET7fM_K5gb29yY45CFmzFgPCtLcyxJeBn7GlAVj9Y_gBSY8iBgfLYkMotCGgNkuVesA7szthHh1rEgQbVEW3kOG-FNNtSNYSlwfhLVbnQtAeZL6JlE6okA2WuNUkTVC-sEDCu7tRvCvUCbd71bi4JonwQ7s2LXBi9gzJrzmuHcIbxq-owQ6ahDW8SA3hK-HSbC2474-ACC4xL_Y5rtwE5qzlSqsVGQ5vCo_A-fA3plz9H5qO_lTu9jll2df6k0iKOp2LY03kFv7vEAGNycoLU0alyHILclGfElMgW0M4a55PE5LsXglRtjyWXu2Vb-Azu2vYASuVXW90MpO-LL3n-UcZT_EPnOQoPiVLbvYaxQpu_7E6RMG9_Rz8WaOa0FRvh4R4X5_2RRGHfH8qndOJx_NKzrEeuYichJ-ipneBw-1PqIi1wp7tK2wzcCW6LQ51g_Olvz7arxcyqug4K0zuQOPJ7Q5msBWTv1lfz0No-D7LWa7ErkSZib8bshxQQrWuqV9ogHZyUXEghY7JKJ5moY-hsGYx-XpOg2KGIgwvl6xK337c9o9fLkymApc7LXArZNIWdVifGYHAZ2akMew3A20Dbtm-0I-67F1vmrfd2mSVzG49ROvf7xRDQA8bUIS6FQe9ekpp6GOJxnZrV9UOaP37H6YlMoO2rg8deFiDRK317kSSlJYiwVuSu9NGWT0qD4hsLGTdIRPQwUDShfjbxL9PVa5bklEXuFzZao5Iv0LJkSgj3jVkKCsDQ8LLoyJA7PiJ8uGCrNC1oIXuNs0RKyYKhky9","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":45,"output_index":12,"item":{"id":"rs_00e767404995b9950068e6480f16f08191beaad2936e3d3195","type":"reasoning","encrypted_content":"gAAAAABo5kgRqGx3zwhXFb4elaopbSnsU7HznDhJ6BQvZc6F45lekqR0kJF7nDtrPpt9JH9ZwBBl5xHUN2S4dlWBgWiVvQZYUL_3Ytc3AIB7kyDjQLaT9s6EFFsgIE93aeVHE1rZsX3mZm-VUSWLCNz8PxujflN_143h9JLO7AzxB5D0tkd_1bAP9lVEKuAVxdwkGTmGn7oidTHfoAqFgdFHkpJf77ka4FQSrTHVr_CGptlT7N_tJGdUDaTiNgeBak3aadpFxT5dNS21g_DoHzi46ZcguyCt6pgea_fFNwv1QRkE8Jx7qthNfCzMP6g79RAR008SwHrVq6MjQQvTNH8XI6giX75pnyt47fR4oAn3nQJsp60KlCld5vy9V5lQpvrVn4P1BBrXQX0eXqmVIOp2VQYN6dtVjHApFzJBZRb2tqzCiShK939eDoypVtr6oI84oTLlGBtOyeXSsk-kYY_wUwpqPbmCcvj0OfvzXXFzL0knmP5kzKN8KGE1Ko2sOBkRyRJ9dLUEsVQdyhN1QDrtgSNHo7QAKOD2FkcjMwnIXaNya1Pn_yhCDVl-f_jgdPlWvHf4N6nADs-6YM8kewF4VJNIEyVNrOEUkLm9_uIuJngoY9pxvgxEC1zAi2210NPhLbB8rivvOpItlW2KWk436zezajGyS6AdwDNpW88L3QBNNXr2cx_g9KFAmCmCQ97jXDRu9UYyiYR66YblYrxZ3dfc6gy4FCH4yZP-89Kt-o-g1wM3DVKNAF1RhIwHPFP5yv09Zvu3zJYYFvI6k1mUeDBJM_ipR59ja5zLuJZpVRRCPy3GQ5z4ZbjkDjftmN3-A99Bh_6Uhx8MOMspGhdYvK6x__YGp8_UjpqmIMEmqfxsbrNaBaoisMflxzejTo4tlFGQlw9JFC2QHaYN6OG0-ibNF4VR8JSmDlQ8bqUJzXKQvyPKhrcQUqCRbq9N9TWnB7YZcyC18FBdwVrYNTaWMy8AVQUzFEpvErHRFmANwoQcunIYZFVEkBOnO-nf3Qkb6VD2SpnKRf-NGWTai4H3pdbw4-ZlECKi39BWT3w2Dtrp6erWeNyuYcLPasZ8eQoc-2sbn6ahRglb9ElefIrcdw3IIqEF1sE8qsMvoVlRl6trn_kIFZ3e63dSpgmvjWpxYLJwIhtyoOCR6ddGlr7Vz3sEoiVbmp_I7T046EdyIUjBUXfut29WZ5DWpTlaI-q2YsxwLJI5Z6jEAMOue-oJ0LlG_mfGvysspU8LUL6Ls4GOvR5kuk6eyxJ4axuaXICjCgSUJvEvJCz17gMLHKYYeErlYMUgz-GD1yO1pJvsK6k1NX3ggZGWR5Ra2RkoK1h70KidhBAsXiXEFxFNAMi8E0aB5WCydEVlNl79m86CDB7YTE9LyIxrMc9ZyYiSHzLJaLJHDQ10X0KLRxvjM2Upz68u7aJRtRU7JX135cGL5K1MqRFZMA821b-p1mTuez483R3-Q7fAPH2p59s-BUEEIeHJUfc0ZDg7mAEYhNmAtfWbfG5KB6IiCFMRWCm5jeCAUm1KdokUfyEv2I1Qw-JDmmHLdeUCk43If7wgcN33sLiNuS1TMJ3BBBC4qhHrkFYa3IU6ketzYqrqb9SJcWws5xDYxV2oCV5krYoNGg40BdtMfwUAmCI=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":46,"output_index":13,"item":{"id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":47,"output_index":13,"item_id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":48,"output_index":13,"item_id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":49,"output_index":13,"item_id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":50,"output_index":13,"item":{"id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_PYTHON_LOG_CORRELATION environment variable"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":51,"output_index":14,"item":{"id":"rs_00e767404995b9950068e648130f0481918dc71103fbd6a486","type":"reasoning","encrypted_content":"gAAAAABo5kgTHcu5OJg4QP9h3bQ89VdC1knJ3GGoDCHDtyhVM20pT9s_x2EFIuYBAU3GH2stuBx8KUQO04_n_IjUDz0cWHWM_K-7A8o2neYCppxILORhbj3Zhuo3mudW9QWHSZcPA3ATtcuh4oyo8-Mp8hcSSx-mVM-FEwvCse9WYndrHYccDriMZ7CPchx-p1Xc0McS0eB4cmdWlmZQqLzp1bhbaRw7F2VYyc9Wx6Ymsh3UpgbuK0YUlcIb1teWYL2MPw759NvTiwOJHhOroQ7w_Hvip_O7bHbiPca0FZlacVFqglkIr4ttJAXs5swVUxaPWljLaD3r8eKdm7ixbdfH8mEe5HRZOe8t-H8hlpx8PagZDXVxA_E_TR0mdpfLgjAxAdIwSkduquGeFQaWMtVhufYUfFPfHGWmCu9oUOYlUxk00PJbgRUufa9xhUwNBBJvHD2pGXB3p7OMZ4xDHvmYGpI39vczQRM57LiXOrEVo5NHHNh4vV2AFAeBvq5XjlhjH9rstuZVeK_QNme2qsk8XLftvvhyA_SyW0hKyRfAMIwMVG_B2qpS7zibtNEDVXXIKfsla1SQ0bsyZoqPtMw2ZH6qciO5BABdk9BMckrVLtrLVNTSX8IwkAT72OCfCZ1AvfjjSnqA_TKeKm6WSnhuuyo6ZhacNxgqw1ADE7jEOlUKEH-M8twJJpkIzRqpyRhIfJb5H16kmgjFSdPH_8kKU3tZaYKfrTGkhgYLNHw9g8LWYs-xOVv6qgqAP66xtJJcdiRyi9nwMgOsWorcYxU6g8PCYvc2721q6oGNhVu9C-hHMejlUgC30SYdPoozAcjgoRHbLcNN","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":52,"output_index":14,"item":{"id":"rs_00e767404995b9950068e648130f0481918dc71103fbd6a486","type":"reasoning","encrypted_content":"gAAAAABo5kgvRqkGOKegaH9grUqGCPO9vfDWr1XwkE-Wtzt6L0t1rany58X7-oMFUxwaIN89pOPaz2k5N6xvW7MZ7NcqhyTtYC03uG2IUnxEN981cD4nfmZq8YJejJK20r0rEEGNWlEzw8g-CAWaeLaEw8Cx3VeleP1vWY3gBurvGnzuhgYn4iZqHB9ShYSI9-nkJw0JNcnI7JFpprPVZlluDcKkTLcfXYfJBgwCTVDVmkqTqNeIxQD7VE6rtzXpgGKiLDoOTEpX-4NzKlFYLw2XN47LbOqlFfW--nNHtfbWh_W0b3cM-sL_PXVSr1jccFaNMR4V7fioRjcdG155tknwzrcWIWkUKHWy_u7kaCQPbG_RKcRVQ0eTziQOdphQESXlPsVrsSnvUYOhsXOEJsbJueHZEEtZZ9wXRk68fEGw0llPcvSEPBj4ThgOrtXKlmOenvvF-WQJbodsIwbXSt3pj7H6yu83IW0sHzmWeESM0f-6SXwZmLevctlxDHAIQnd3KOqRq6uGaYv4bcFIpXH_Rmflacl3R4hlfxUcMdETlywZaSyQhomLvsqxdS4D6BS6uofysgP0YUJO9fnf-18hbyu8OGq5DeeOmdQ8HgULGeyuRCeFeere7jY6blGWtKOzDAhRaaDQbzGZwF7FMbo6EouIYdyI6gyFctN7YlwE71vthJFdlSzHXVQpibNj5u17XXeHbIeY0SI2xciz3javvMxnsgmpTo0PSkcZMYr5OvcA3IqRZl3Q9XfTdySTeFr--kt5c0BLzrdrVamls2fp31jNKO4yVZczUsm3lEQPB-dy8r1KbaGKiEhc45ZoTF6dMoh4eSkpJpwpOb-Y-VOh4MjVAjwFWL4gHgTaTQYD8trLuBBRvf8uCdbF_CS-fA-sDRe843HKHVZM9l-J4rTOGGZ1dTvqOTGv7KV5XJNoGmdJ4rX6m6TAvtPJzQNFQDQcaCuzfYcakxgIPFaqdEoH_NFIKXGpnZLycxtpNm97Ol1qkzoW_K3VNTeEMiWEgy7jC7ATUtyzpZw3o46KsOog-veyfCV0RjZXiMnZLbWo7Syjfxxed7wn3u2jMCRt_8LsOzG8CuwuaMjCTxooxcd7KNBhYxX2LJhKl-F_cFZhPg5U6ciXORcYN9Eusw3P4KDD9A-s534N-xDHSb-cJmg9dmo50IAh2yPCxTMJMK8qIw73o57tHrSab5KQUk-jE3C1ZKRLNig86S49lXwOtZQmHjVVFCKj9V9o7Hx5nPYEb-eT9YwaZBtOB8t5JOvRThlCrX5ygFix48JTF34JJAcwwxDcgB7k4Xg5aeODslV4sggehZreJ7Etaj2m7ymGARjPvnd0-xpy4TE-heP_CX4hnJwL2JinX3FHJwEpDjahNkO7D3x54UmttS9RBCbPTKL7txwpshCm0yhZ3tBZWTN9OFm1HEJbjt5DSHCBKNjcWKWQOHQbfSis8pHjz2ipw_cp_c6qm-n2GsGvMkad56-2BYVZnkN49t5j_yu7nDV0pOifl6LrclQIxB1mRnpvv2bWyeshzgd_VSxIL9x0oLnZ61XQk9ao3FnTp9cUDZqBb67GiJ65agzMd_7vTDAcLFfhBtZX-_3kJ4BLldTxEuilmCtDCHa1PrLxqYKUH3jqSSNY0ya3cdRDNScv8ew4NzEBbYmzWh13IJdwqlgIceleHxKa70zZsudLmXrcoNxBJnQwSk35jxPuYGV-jD9J4pLR3kg9nvwtiTSEYQ8rQBLGiw6c5u9V4-3vhI91n8K1DFA-jQeusftJWUsdQamfONlnnGbDoMA_Da-9nHpNloTFJ6Effeb6RuIYAP-jP9EgXx-6oT8MNoahO7Y20VXf_laHYdNBDeZuiK2EFFyrFsgLEyLyWKSZy_dpsL0qgO1H2LIQ7jSYlfs0s4JC-mhkcfxRrLVsmmVWD1JWdfPXA4rrx1uq3rtNUeZSQQDn9xhgRsDXIbk2Dn6o7tB9jIk_dz3jSwcw76CRSIOelpyc2gH0TffGZ8ieXHhDNcJcjcYpd44J07eQylwC1LxpKtMHV_vlBEgB0hB_gfoVpYXFtx1vikqq_zS7LOegLrl01nypu37YNMPy7UaGxtb0nl64wZpb1B98DKP3VTJT-O3C8UoWqkAfY5a8Bjs8jdO1V-443_GRjOAQQE-qEdrTAM1bon767biPje56J01n0HgYAOPra1RFvj6FWhR81aAaikjfCGdEl7HIfmEdBiAf80kDlgbluiG4daPGs9M6pHU8JVIWqEUQc09H2QFjYMB6Rc4zbFsQezt1x5KvGInI8f5xYsKEHMnoPbp0EfR_6NOZZfRYoEByY_Us7qUN1rZLvf9OaUxbFM19KkRog3g8G_DtC859D4Q60WGmpdzptRmNClzk2r5yOgpUiESfuLyJdya4l-qEhLJK2kAOpSxCc_TI61oeO3I7NBvJZLg2FxOpoACHjMYK7XVKKZkQzo5Z_tY6RjIuMg41ihDvoCukiGxEmpEFL0IGsDXInuviVgwvXeYIYh1rrVSdQYhq8jxTleItj-fia0AXGKNsT_8eW50TS-teK66c_yiSf4ghJU86WrU627LmvgjeNM-JzZd-HVkcMSf2Sew65zhDgzUVte8ObpPHo1W_cXO_uQmcKWeQ8lJ7rD9Jt8G5H2VIA1-HCiZfkJN_3HH71cHcH3zuOh1zjdS9Sp1TUvBEoiIoyw9iZ6J6J8bBYham8TWuilgAmOsqZ7M14eiqr0-Wff3sCAEnpJ7BatjZ0CaQr3bUaRR7zxkRZi2052ivjbFNy_dM2RqFpOl5EdopBjVs6V4eETNd1-4jeWoAT8eMdo28O95MXyXnEwHPTYTKafjtvw6HyBGRLUb6tVNU04CWtswWjNYgrz30Ein3ym8GbgNWa6t9U-T6Utiv3KUwhA4lQSzm91XG7WEEhjzPeXL5MfOOkmbrVLecWrNQzaUgPaBZ04HwIXYBOF0wLuJEC2WG6RA2jxa2G5RS_EwX3Pph5_nAWDpHCv-sk81D-k9xYXdobZlD5dCK-4TbXu0IksOmlYkWTqgAj_GMat1PMLtgNsxACR3e1SgUQnOthvz1UZ7z4q46yMM6yJr-NN4Kxud-3jV1wF2GjIaQBveHIuC7i_we4ObNlt9FL68wC6zPgz6N_Ma4f-Rhu-zQpmIoJxoq8h6i7K0AjEIV5KB4LfSeWsIPh-wJtZihoU8H8MDLq2IRFDYskCj2tmQu56ciHphYWxyS3J2PaNJPJwdPYN8g-UAH_DGQjsXjOL57WW66fabkaMRTWaUh8_S8dj_PY_c51hxFYX5pOKLoZPOshEBFwIGfIAINUK41o39ZKY_tzcqfytYtZ8lUjCVXtNHKDWbluKbEiRWoWdp0IFkViRhvE5J2ZUfc3qNUSi5-opQmFf7FUSqSl33dIcc5nOenxX2cv6bKBYl5fZ8olRGr_YFbb9hiq4bN7QQ7BRlhGEfU-Jn5jkY6ousus5n8ZMDiTo121SLtM7FYHAwlDWFqaTPu0WLg60cAFqXru8eRB2n1HA-2WwoTkdz0SPnCjMoAvqZGW2Y1Fxr9tohOZJpOm-barcwKtFvWC1tOkgf2GnxReTIjYZq6cmVdhz55Wvgy5vduwWoFLGMb2EdeHl2C-y6uJI9t57junYhsxVx84mbHCJttZRB7nq1_ZLrnNOUh8Ot97Z5yMVK2Vfn-88HcT86Uo9O2hFgO0DilmqEIf89EXh-a44NGZvXLToRwZDc-JkmeRB5E9fCjF2uuOv9mPyrFtM8LhMteUunUoETNWE4Fq82svMKq9Mg-k-cNuAbzk34hClf4-c_yyHZw0LKuySRM8jYK9k5FPfDaLPTuDfJ96JeJ5GTb35cF83Ee9r0OVe1TvODMk6t1D6kAHmRlLK3MJkZktHq0ETGlKtNmfHIS7vyn8Ok7PafSWyd6c4Dz50vfNH5INXYm6sruqXlRLTyjL9eg6tKxguFBdk4wsKgVBIgzJ5p1Xs3iDNlwdKekxzPzpHp7blZLUfDtsLSFY0Krf0L1pmzWGSGpEzWFupadiYHG2O7MBe02wibtx01Wi-KyHbeiwO64VuHJplTZ_z0KYSzkrqmWlzy9hXxSu3lBcwbFPLgO0-dlhmJbXHORqBj_dBEbL2N-k9KJU6qqHSM7t1Jj4KaH-Y-8wizK5b5t0ZzU3XEtMY_EAwsdnpqJs4pXBEliZ56KuoemB-lAnd853rwIqcQ4A1Flw88uSMvOHTSTZ0ZirD0pCLyzwW13lBUDpgp7zwZHwDR7DNPUtPpHKDWeK6YkLO5EjsPSLfwhcpysCDdHfrMF1FrcwT_WE6Z7ui5o0LEi-335c95Q8qwlGof9P7UxOna7DjTunXijF-aJNPqmhKE4fY92Auvy4y2wvmAVNuLKdPdS9fVnhMZh19Air0ri8WiXyWvqZOPL1MYL3PElKC5uk_INyK6L3S7aMCmws1zFA6PSb2FvogcDSONeNM03Gd68nJlf4daNQXErKkyt5w5ULSHamEqcT3PZDa5en_X_R4lgL_pUFmy6K70Xxw4LiUvH0gO1-v1jP5IH6aNc6N1cXQs6tt1ajViXcD7aCcUovQ2Ejmvq9EUmgVLWkMaXJB06c7ErXME7-9wqas_bj48FBV9mGwDLxif1lsxoe7pqT8xZ9QwGiKoRVb4t7nJUqLWcx3oGDgEetUMyI0VctLixoDdvuO4CIZliRm6ilMomyfNwlHdBMoU7-AhZThBjp8eA9vMz7LWIl-TFuueueodO9FZEdGOs6uBZffBYU39oImI0di6oeT5upaPkVCBEBLmwHmDEsfiaKR8IPuVjNSo-gX8JKu-GHmECGYb_3ffN4C3IG5w3RACsKzQfL2L_tXBF5a1T0NtDtrjeGI-kYFhwd4mM1HQFNKVYx_475tInEGmwbdtzYKi4OrdOnNUG8QyxnRl5tBXWwOqHvorIJvwzQHgm9jnaNMekwUhb_8fiUhm16G1qhBLQZlz7eofhB9WcfuC4dxrdvMhfGEsXH9b_gCfj8vazaJyfmm6PGnyJIK6b82dXtE6JIJfwmX-m1J7AGCaW0So98-XuOUAvDfxDtiOap7JxiIRbrmrY9rLDAMAkE3BmXY9tX-_tMQm8c2pr3ioc94mWntYbIT0QIiGpentwiHJHComvQkb9Ss3DtOuz5vtP6ImJXXOMzb7FVEg6Qc38qfpSw4XtYfrjI5EaCmPl9P0exeeHN_RrI9-VfuM8KdMrjlHd1LbgwkOOKB-O8YpcuRgUTySEDngxQYYA62jmu03Dfc6eyA4WwptQQZZt5RPiXRD-gxOdhjcNGf6WLMYTmI_pBeo56qSbhbt_tllPYbvP1NzZcKb-ssdYZX9m0eEZTqxWeHT4iay6kq3QIo7WUD6PYXin5jRpGfB6FstSoJcUhcJ5B0ld6A3ij6DsdzOy799EVtxrANZ0VEEg86O1x2ft8wl481wDAcDBObPrTS_i7peacVDWiWxYIRWe3NGXCmq6ZU9lBB1iQeOZ16WRVU1g50_AHxiuwPU_x6DmWXTofqr5VNcpMeIMe7JUxE4OF97r9J7fVomM3JoSUirPXc9Fqk0vqj2aZnqOLyq7AAT-4yqb72QpokQhSixAfWUxg167vijcSHoijL_OOxg8LTBrBnmLKTKwsbKtlSJzY9Pj4iws4tQRpH6LkNNkB0EVZBQNuqEzOy1GAeVfC3fttdV6oQLpq7ZGwt777Fw6HORX8dhqpOsaHyQz-VeWxtnZEfWLfWyIy1gouiDmTLdoNKmON8xy_dAWuEerNf0_Hqm8zbqlBGq2xiictUZZTrPtiX-u5y_nRldhSSIz5lDlDaNcSCNnlElMxjQ-pbFKQg_Zr2jGJeuAY6AuIvW9M7sDROlh8loCGzO9klwYfBjdldE0HsQhyJ8h2sg7qRaxcgGSFdnIVaSnj5tLyVtOiskFQOGj1U7tUD5jE_6nro0Np90_N2X4OquME6lqwofyMOSU3kUo-4CKiq9EF9RksWM9NCpZ59XQcWGTKqujf1cNXlXo4Oz27eRLGzUAhxY8XdTdxEtQ9qiNlAgnWMwFwDuJneZKoKxE3BFf2vXtwUdTMG3mAaLnRfqxRNb8o1QthMVoeLGVDIf6bTGGBVjOHJ5Rsdbq1ZsiQzJcNynvSfAK57BzMIOF53JZ-R8qvrgxapUM7gIqgtdajzTdl9Dj8MWcm1n6DQSWd0au6ZskpDnyfRmF0Vx5qYOwdHcWel0XtDUEh4pBzCxpXnIR2UFF3EbUYfSOZlk2c4RrdGaKApiJhrSveWzbyA5BwbmSj5QRYc8WY1Rjwf8FygQRXQpnFPsgoMuyxcverMLIOGJ2vnAPy3sQ9d8nOYQlOVD9kXv27eRl6ygYXGNm6GRr9lpRU9TFGaoLF4JDFQbQYvQvM_UaGUAM9h_X2Lnnljw4s9AFPV1QR4MgKg8_X6dQt3DKD0Lf76I8jXqdIzIJvBNbY3tAAgvC7uypf5cqUk27ImdMZ2OZoJmz9NcqTiutxBRmJbTIzzWqdwHLKgjeAz3OdTf-eNQO0qNSsMGs8-xC6xm7puBCjKIFkc2xbd3F2HZ4N91sGczTakCJB-5h3G2TcKHFZptvEGfvM06KqRlq6JG_XVgZuG1_O4im0FgDoEgpsR3_wddshPpOavxFpAzOOIY7A344aHAHXyGZtU0SG0Otbq2U2iUTSsmmnvbwTKhnh8OYyytl_zoUPmNSNdkOrQLw0T2K4DNTXyhhdttvff4wSlZ5JNgPWc1vS8Olz_QE1EuZFDrxRFkQo0Tf3mhI1c4LGp4WY-MuMXhS90rQFIsuCUcx_l7KqKHcJMxMFri7M0uY1NwBgvTjlgwsNwZAF1CTUBpnbXMk4dBkxDNM-ha5w3PF3u1JLKNxlGtlqX7QeQQc7zmMym3grPBUM--XV70NvwAKRaGlPSGLnM3ESjO5tXEH1T0ksEP9CSAPfMWk2N4B2Mr6tyyBQTY1GYLDJlam_PbnIVySDjQB9IoPYzfdI4vUWGY-rX8rPF2pMde24r6SqUI9K3hbSgkb9gq1SUNoAsuZRhelLURV_UM3Y777XdSxdCQzoh7sTFu547cwX97Yr3cBZFyzf0BHlt5mNX0WcQK_CHJr_QqYIGrkMilcLulApWXdYTb0P84ntX_LR7rrmFQBfjlVIacsAJ4YvitUS8uRKfVdG1BxlCxaUmVBJjjSYwyPCEUy3g7EyUSn7TSUKj3zBH6o1mZQTXHd36j8AEqeCWNofmrr__j0dMcg7mGlAEE-5h547MDpfNn_bUl6N7S5FzIF_5Y7Cu6c6RtCb63F_XVZm9nMrYOaww-0OAKjPiLiQ59fE3Yb9tACEgwmbcdh_txOmqDu4wIeJmspE6qgxfyadieE9j1gdI0rNWhe7saTKzzo48wGT9ljOrBRUAKmjYR14RwRqfeJqG2hHKAWoZrWAQ2QlxXk3SzVhHXzBTEmCCO_hZFd4YuhSRi65EeAakE_NyDzcsz05Ez4aZSNXonyv7CsNLc-EXrxXgVBReRKxawt8e-NMFy4monSuxBalAr1LArj3f9HHgnb3dERoqG-qfQr6c_0Al4kdm1FFsuX8vtbIPaByUPVI_fQfu3BScJZkWZEMZAoTliy78JhxKPVEQq1JjTVc0rGCcFl5iL-_s6oK9vYbgevfPTl8PFBNUbEr6cfNvikMOeJ564506ZVuzKsvTgXniSQvlk8dgDW29AGSE7dzeLQqq5IdLGqqz_fSc2mQxcAcJgwzRLT1LQr0WkHC17yIbh6MtCfRcLm-vyTuzLiDeTBqoOrH-3WOB3AnHae3nRrlwAR_UFsA1l_fme2vruv2MUY9y8DHX4vwmAhNpj1w29xEuzQpjNEMjKLJiyvf4CwzN7tQU3zUtRnQ8kTyoj2Re5EzETQ8jeF7ZWCZwmMbSQWH4xDyYj5_P96dMaUHGaLBOW-hT5fG56kCyUnV_AcnYAa55FeY-J_j0qHdUV3ZK-bizwcNIlI_kpe0GUGajdNvJZv6r4p9-BxRIDbIHidNjaP8nnyScCYU9uWfJxykcKGgl7jZeWfSAfJASkwsbbfH6Ug7Msv9i2R2HnZNK3WdlAWA4IcpHw19wp79Jq9yE--FTuWaORGD_jcjE1bNPDbQ1AFXc_naxhzV5-XRsFSa_wp4qQznepjGjc8PpczcoCcdfYobR1ZOcrBhhtufEoA_1-ZRjl3f3HPVX3rLFMrIIopS7saLTC_d3e1J3f9CQktA2zGfMKErQmyBPk-UpW1QnTVi3E_qB_IDbQNkV_cOU3doKW9hQXPb9di1An2qhOQSoBaoEnF4xxUrrlfrF7g4WZKM4hcJ-LXNOIk0lhlImnArFnepgH2outlUM1zdn99PXWFbVnnj7VTIvHQ7ZiLV3ej_Tv7oimAme2zB1f83PkrAXPZZ-TzfrFkyFSwOJs32U7VBpKjVhsd3_RxMtrm5QkZiMHKfobGlV5Wd5dmpYgdjSyVv_FkJYVd4itJYgfIh-R9ckUy25Qmy6mlKPCHPB6t1-Ip5e9ihYCJ3jiCCTuw6lGdp2quZc2VWWOm13nPA1Xol0bGIPVMFtuKXYXCRomgSomjqhm-z0NVvQrMsm_YFXl8Yiajq5mXxpDI3rkV5NbHca8mQItDwZ2sfbjtD_UIpk5lGXufs2M5xOzrdk8hSOfy3tLR1q-OkOZaE9U_KWPkcP-FESCoG7sTnVHpT18Ht7s0AneBoo_3q1qEEcX3jmGHwlerOLjpSyZRTaspteUfYXH_0NeiL2HBn3aMW14GP9VsBHAUPlynmFlxiqM8YDpG6bIAt4Yila0cCM9DLn8ja3_-tJt7rIdf7iCJd_753PG24KeKpr13wea0QZ3aBoXwnpqa-C4gbH7hKnExALeEDoUzc8vdENrlaJTYCAI6yM7lg_EZYSRhrTFqDZYFKW53I_i8YU8IfO_7R8s3A2jbTSuhf9WhxD30HvbAlDGk27LSbF0keP88_Vnru_mFuz7z_u59iwhnfDxyEmgNtwPWj7NIOThc5CdEjvTiCwMhHF6u5CxZDN-YNtwQK3ai0dnp2w==","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":53,"output_index":15,"item":{"id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","type":"message","status":"in_progress","content":[],"role":"assistant"}} + + event: response.content_part.added + data: {"type":"response.content_part.added","sequence_number":54,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":""}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":55,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"Short answer:\n- Default","logprobs":[],"obfuscation":"QW656YdVl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":56,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPI/OpenTelemetry","logprobs":[],"obfuscation":"3U4PZ1gTRV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":57,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" instrumentation already records method","logprobs":[],"obfuscation":"q2ksPhSf2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":58,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/route/status","logprobs":[],"obfuscation":"sw1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":59,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".\n- To also","logprobs":[],"obfuscation":"pc5Ee"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":60,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" include HTTP headers","logprobs":[],"obfuscation":"Ql6sBgDH0a3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":61,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":", set","logprobs":[],"obfuscation":"e4Pg4aL3Sh5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":62,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the capture-","logprobs":[],"obfuscation":"Fno"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":63,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"headers env","logprobs":[],"obfuscation":"EpeQm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":64,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" vars.\n-","logprobs":[],"obfuscation":"3C1N25eH"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":65,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" To include request","logprobs":[],"obfuscation":"NrPzS6H91D8FW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":66,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/response bodies","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":67,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":", use the","logprobs":[],"obfuscation":"ZnJ5pMj"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":68,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPI","logprobs":[],"obfuscation":"7sm32rpp"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":69,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/ASGI","logprobs":[],"obfuscation":"2gFmImEqUg8"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":70,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" request/response","logprobs":[],"obfuscation":"KwiDLsxL9vtk6Aa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":71,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" hooks and add","logprobs":[],"obfuscation":"m5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":72,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the","logprobs":[],"obfuscation":"02ehOPl5ojtF"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":73,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" payload to","logprobs":[],"obfuscation":"oo1Q1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":74,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the span yourself","logprobs":[],"obfuscation":"lr2fU5ljGqaz7s"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":75,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" (with red","logprobs":[],"obfuscation":"jfWSWe"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":76,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"action/size","logprobs":[],"obfuscation":"PYKcu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":77,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" limits).\n\nHow","logprobs":[],"obfuscation":"Fz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":78,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" to do it","logprobs":[],"obfuscation":"zOdlIvE"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":79,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\n1)","logprobs":[],"obfuscation":"w0105lPiPO2c"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":80,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Enable header capture","logprobs":[],"obfuscation":"3zy8uPPFOT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":81,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" (server side","logprobs":[],"obfuscation":"zQQ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":82,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":")\n- Choose","logprobs":[],"obfuscation":"7IXrNN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":83,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" just the","logprobs":[],"obfuscation":"1pPuoih"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":84,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" headers you need; avoid","logprobs":[],"obfuscation":"MQJJ4QNI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":85,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" sensitive ones or sanitize","logprobs":[],"obfuscation":"uPTnW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":86,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" them.\n\nexport OTEL","logprobs":[],"obfuscation":"MDKjWaiX27Zhd"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":87,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_INSTRUMENTATION_HTTP_CAPTURE","logprobs":[],"obfuscation":"Tys"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":88,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_HEADERS_SERVER_REQUEST=\"content","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":89,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"-type,user","logprobs":[],"obfuscation":"1GqmGk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":90,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"-agent\"\n","logprobs":[],"obfuscation":"vAP56nOS"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":91,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"export OTEL_INSTRUMENTATION","logprobs":[],"obfuscation":"xZc1w"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":92,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_HTTP_CAPTURE_HEADERS","logprobs":[],"obfuscation":"4gkvCmoHp5U"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":93,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_SERVER_RESPONSE=\"content-type\"\n","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":94,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"export OTEL_INSTRUMENTATION_HTTP","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":95,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2)","logprobs":[],"obfuscation":"TvIZ"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":96,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":0,"annotation":{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":97,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Add hooks to capture request","logprobs":[],"obfuscation":"EEa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":98,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/response bodies","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":99,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nNote:","logprobs":[],"obfuscation":"XaIX2CSRdY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":100,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" There isn’t","logprobs":[],"obfuscation":"rsOM"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":101,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" a built-in Python","logprobs":[],"obfuscation":"J7hU40MnWaDWKm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":102,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" env","logprobs":[],"obfuscation":"NGoH9cjie1b0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":103,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" var to","logprobs":[],"obfuscation":"6lVnhLDY0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":104,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" auto-capture","logprobs":[],"obfuscation":"iSF"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":105,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" HTTP bodies for Fast","logprobs":[],"obfuscation":"s3ZURVDE7Pe"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":106,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"API/AS","logprobs":[],"obfuscation":"LxfNueuvmk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":107,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"GI. Use","logprobs":[],"obfuscation":"yOzBvJyAa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":108,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" hooks to look at","logprobs":[],"obfuscation":"GQdMrMH4MLkmK4j"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":109,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" ASGI receive","logprobs":[],"obfuscation":"g5q"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":110,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/send events and","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":111,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" attach (tr","logprobs":[],"obfuscation":"cZoHW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":112,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"uncated) bodies","logprobs":[],"obfuscation":"9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":113,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" as span attributes","logprobs":[],"obfuscation":"Y238731jczz4l"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":114,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".\n\nfrom","logprobs":[],"obfuscation":"E8EBQ9xD0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":115,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" fastapi import","logprobs":[],"obfuscation":"b"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":116,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPI","logprobs":[],"obfuscation":"VH1Hv2e4"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":117,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nfrom opente","logprobs":[],"obfuscation":"TbsE"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":118,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"lemetry.trace","logprobs":[],"obfuscation":"dXL"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":119,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" import Span","logprobs":[],"obfuscation":"unv2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":120,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nfrom opente","logprobs":[],"obfuscation":"7TVO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":121,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"lemetry.instrument","logprobs":[],"obfuscation":"jMHMj7SWlgIy4A"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":122,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"ation.fastapi import","logprobs":[],"obfuscation":"9q4BDBkbIraH"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":123,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPIInstrument","logprobs":[],"obfuscation":"ZfamyqTfcvhQ8g"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":124,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"or\n\nMAX","logprobs":[],"obfuscation":"NTlVsxprA"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":125,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_BYTES = ","logprobs":[],"obfuscation":"sd1zZTo"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":126,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"2048 ","logprobs":[],"obfuscation":"Zsda1j71SNR"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":127,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" # keep this","logprobs":[],"obfuscation":"j5DU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":128,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" small in prod","logprobs":[],"obfuscation":"rK"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":129,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\ndef client","logprobs":[],"obfuscation":"nvHY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":130,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_request_hook(span","logprobs":[],"obfuscation":"USmXfXueAuq3wC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":131,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":": Span,","logprobs":[],"obfuscation":"Ws1OsCUbr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":132,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" scope: dict","logprobs":[],"obfuscation":"4iFz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":133,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":", message:","logprobs":[],"obfuscation":"MOHXRd"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":134,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" dict):\n ","logprobs":[],"obfuscation":"FYlN9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":135,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" if span and","logprobs":[],"obfuscation":"zzyk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":136,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span.is_record","logprobs":[],"obfuscation":"z"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":137,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"ing() and","logprobs":[],"obfuscation":"fw87omu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":138,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" message.get(\"","logprobs":[],"obfuscation":"Yr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":139,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"type\") ==","logprobs":[],"obfuscation":"fqUD5uU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":140,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" \"http.request","logprobs":[],"obfuscation":"Ka"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":141,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\":\n body","logprobs":[],"obfuscation":"R"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":142,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" = message.get","logprobs":[],"obfuscation":"6U"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":143,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\"body\")","logprobs":[],"obfuscation":"L3lZVjRl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":144,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" or b\"","logprobs":[],"obfuscation":"Fb6XQEIoBL"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":145,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"\n if","logprobs":[],"obfuscation":"UQY0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":146,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" body:\n ","logprobs":[],"obfuscation":"jXQYhCBIvivXgj"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":147,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span.set_attribute","logprobs":[],"obfuscation":"2oyOpzLJPthfZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":148,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\n \"","logprobs":[],"obfuscation":"lucm0soBA3V0L"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":149,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"http.request.body","logprobs":[],"obfuscation":"S9olTEyX8wJDM5m"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":150,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\",\n body","logprobs":[],"obfuscation":"OD1u3emOO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":151,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"[:MAX_BYTES","logprobs":[],"obfuscation":"WKrMe"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":152,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"].decode(\"","logprobs":[],"obfuscation":"uqZx7I"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":153,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"utf-8","logprobs":[],"obfuscation":"XSqoyd89uue"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":154,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\", \"replace","logprobs":[],"obfuscation":"l7Pww"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":155,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"),\n )\n","logprobs":[],"obfuscation":"Ak1JmuUaWvBkZ5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":156,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\ndef client_response","logprobs":[],"obfuscation":"agItEfDavuTZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":157,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_hook(span:","logprobs":[],"obfuscation":"0VAuw"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":158,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Span, scope","logprobs":[],"obfuscation":"YghN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":159,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":": dict,","logprobs":[],"obfuscation":"YibR257Bu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":160,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" message: dict","logprobs":[],"obfuscation":"38"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":161,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"):\n if","logprobs":[],"obfuscation":"JUf91vW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":162,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span and span","logprobs":[],"obfuscation":"Sb"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":163,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".is_recording","logprobs":[],"obfuscation":"Hkb"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":164,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"() and message","logprobs":[],"obfuscation":"z1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":165,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".get(\"type","logprobs":[],"obfuscation":"yKuYux"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":166,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\") == \"","logprobs":[],"obfuscation":"1c6fxZTMb"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":167,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"http.response.body","logprobs":[],"obfuscation":"9aZ2PGrpKvVn13"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":168,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\":\n body","logprobs":[],"obfuscation":"b"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":169,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" = message.get","logprobs":[],"obfuscation":"x2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":170,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\"body\")","logprobs":[],"obfuscation":"Dp8N8Iah"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":171,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" or b\"","logprobs":[],"obfuscation":"Qx2tMBpbMG"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":172,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"\n if","logprobs":[],"obfuscation":"pbgt"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":173,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" body:\n ","logprobs":[],"obfuscation":"ODgYUulNj6ENmJ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":174,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span.set_attribute","logprobs":[],"obfuscation":"AApyjf2xz4kJu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":175,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\n \"","logprobs":[],"obfuscation":"gOJF0rdcPoyhO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":176,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"http.response.body","logprobs":[],"obfuscation":"jumSpqpiJy5SRB"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":177,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\",\n body","logprobs":[],"obfuscation":"sqLISm0AU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":178,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"[:MAX_BYTES","logprobs":[],"obfuscation":"hrYZz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":179,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"].decode(\"","logprobs":[],"obfuscation":"wysmH9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":180,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"utf-8","logprobs":[],"obfuscation":"IhrVs5vsvpY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":181,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\", \"replace","logprobs":[],"obfuscation":"PEUqY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":182,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"),\n )\n","logprobs":[],"obfuscation":"ZueyBhDDVziCUz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":183,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\napp = Fast","logprobs":[],"obfuscation":"R9HPs"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":184,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"API()\nFast","logprobs":[],"obfuscation":"kcdIII"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":185,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"APIInstrumentor","logprobs":[],"obfuscation":"U"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":186,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".instrument_app(","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":187,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n app,","logprobs":[],"obfuscation":"tzAvfXN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":188,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n client_request","logprobs":[],"obfuscation":"0jlVOgOrTXf95"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":189,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_hook=client","logprobs":[],"obfuscation":"42nw"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":190,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_request_hook,\n ","logprobs":[],"obfuscation":"XdokTWr5sI53Rq"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":191,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" client_response_hook","logprobs":[],"obfuscation":"Ryt6PC32eRZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":192,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"=client_response","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":193,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_hook,\n)\n","logprobs":[],"obfuscation":"gUGvAuV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":194,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n- The hooks","logprobs":[],"obfuscation":"mHgR"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":195,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" receive the AS","logprobs":[],"obfuscation":"2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":196,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"GI event dict","logprobs":[],"obfuscation":"Zhr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":197,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"s: http","logprobs":[],"obfuscation":"jfNWuMPm7"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":198,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".request (with","logprobs":[],"obfuscation":"Yh"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":199,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" body/more","logprobs":[],"obfuscation":"YLpSO3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":200,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_body) and","logprobs":[],"obfuscation":"Y4el37"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":201,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" http.response.body","logprobs":[],"obfuscation":"tGVrDMzxQGABo"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":202,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":". If your","logprobs":[],"obfuscation":"H4TMy4C"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":203,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" bodies can be","logprobs":[],"obfuscation":"uN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":204,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" chunked,","logprobs":[],"obfuscation":"5k253hM"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":205,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" you may need","logprobs":[],"obfuscation":"fq0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":206,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" to accumulate across","logprobs":[],"obfuscation":"i5xiKyaUonq"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":207,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" calls when message","logprobs":[],"obfuscation":"fQC4x9Vw6BuEC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":208,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".get(\"more","logprobs":[],"obfuscation":"PbbrSS"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":209,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_body\") is","logprobs":[],"obfuscation":"GMX10c"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":210,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" True. ","logprobs":[],"obfuscation":"o74PybOdt"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":211,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)","logprobs":[],"obfuscation":"tEbKNEDIrbvhWx"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":212,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":")","logprobs":[],"obfuscation":"Lz0hgKWzSnbRROi"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":213,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":1,"annotation":{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":214,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\n3)","logprobs":[],"obfuscation":"aBwAhxAEaX9w"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":215,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Be careful with","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":216,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" PII and","logprobs":[],"obfuscation":"Rwe2UmHi"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":217,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" size\n-","logprobs":[],"obfuscation":"7DffUf27X"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":218,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Always limit size","logprobs":[],"obfuscation":"pR7FBMSjv390qu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":219,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" and consider redaction","logprobs":[],"obfuscation":"Vf9TN26VP"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":220,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" before putting payloads","logprobs":[],"obfuscation":"h51TunAK"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":221,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" on spans.\n-","logprobs":[],"obfuscation":"JDdO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":222,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Use the sanitize","logprobs":[],"obfuscation":"91PRbxVnGZLvgGc"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":223,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" env var above","logprobs":[],"obfuscation":"s4"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":224,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" for sensitive headers","logprobs":[],"obfuscation":"Sy0mQJNYtV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":225,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":". ","logprobs":[],"obfuscation":"Tv2saDsNvGP0Ss"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":226,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n","logprobs":[],"obfuscation":"60BO8HvmqJxN"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":227,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":2,"annotation":{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":228,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nOptional: correlate logs","logprobs":[],"obfuscation":"7YeMGHx"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":229,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" with traces\n-","logprobs":[],"obfuscation":"EZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":230,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" If you also want","logprobs":[],"obfuscation":"k2hW00zK4y3eqbT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":231,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" request/response","logprobs":[],"obfuscation":"7NpO5LJj9Gy2m67"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":232,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" details in logs with","logprobs":[],"obfuscation":"gs8UaJ5wQd1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":233,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" trace IDs, enable","logprobs":[],"obfuscation":"Z6a5MVGPKdbA1u"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":234,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Python log correlation:\n","logprobs":[],"obfuscation":"JIOMeuA"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":235,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nexport OTEL_P","logprobs":[],"obfuscation":"e9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":236,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"YTHON_LOG_COR","logprobs":[],"obfuscation":"OOv"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":237,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"RELATION=true","logprobs":[],"obfuscation":"DbI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":238,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\nor programmatically","logprobs":[],"obfuscation":"qnwdSrvm5by"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":239,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":":\nfrom opente","logprobs":[],"obfuscation":"Esl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":240,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"lemetry.instrumentation","logprobs":[],"obfuscation":"23c4XsjdJ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":241,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".logging import LoggingInstrument","logprobs":[],"obfuscation":"o7DV6JLpDWxpucv"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":242,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"or\nLoggingInstrument","logprobs":[],"obfuscation":"TTb6bDzWYMof"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":243,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"or().instrument(set","logprobs":[],"obfuscation":"gUwt2OhTADTIR"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":244,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_logging_format=True)\n","logprobs":[],"obfuscation":"ELUu76gEV6"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":245,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nThis injects trace","logprobs":[],"obfuscation":"VVQ6lbmXDfXy5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":246,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_id/span_id into","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":247,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" log records so you","logprobs":[],"obfuscation":"73QqvEOleOa3u"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":248,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" can line up logs","logprobs":[],"obfuscation":"47BlS9DSo91ld15"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":249,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" with the span that","logprobs":[],"obfuscation":"IiSRw4HuErGhm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":250,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" carries the HTTP payload","logprobs":[],"obfuscation":"NIq2zfj"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":251,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" attributes. ","logprobs":[],"obfuscation":"idD"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":252,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n","logprobs":[],"obfuscation":"VJ71qk85d5"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":253,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":3,"annotation":{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":254,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nWant me to tailor","logprobs":[],"obfuscation":"GIvIt9hZF2F2nG"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":255,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the hook to only","logprobs":[],"obfuscation":"N1ua4nlopnoaPkO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":256,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" capture JSON bodies,","logprobs":[],"obfuscation":"Vw7qJi4GhU3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":257,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" skip binary content,","logprobs":[],"obfuscation":"2D08dmEQMoV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":258,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" or accumulate chunked","logprobs":[],"obfuscation":"4qW8RFDKgK"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":259,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" bodies safely?","logprobs":[],"obfuscation":"3"} + + event: response.output_text.done + data: {"type":"response.output_text.done","sequence_number":260,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?","logprobs":[]} + + event: response.content_part.done + data: {"type":"response.content_part.done","sequence_number":261,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"part":{"type":"output_text","annotations":[{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}],"logprobs":[],"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?"}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":262,"output_index":15,"item":{"id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","type":"message","status":"completed","content":[{"type":"output_text","annotations":[{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}],"logprobs":[],"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?"}],"role":"assistant"}} + + event: response.completed + data: {"type":"response.completed","sequence_number":263,"response":{"id":"resp_00e767404995b9950068e647f092888191843d25c5285ef895","object":"response","created_at":1759922160,"status":"completed","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-2025-08-07","output":[{"id":"rs_00e767404995b9950068e647f10d8c819187515d1b2517b059","type":"reasoning","encrypted_content":"gAAAAABo5kg_CSDv3y6-9rcajr9ztNF7AK9MAhzPmxGVnKj4L0iSbQJ-Q2asW9GGjj-PqQroo8OJbOexpQb-Y7t1rlh56uJjv6EzdemlPhVEyyUo4cc3UKGYxJTrLsfbgEGAFwfWgokayL-nt-6UULQmzGyc3YJR_CCDJPfEqz-88vaIfc2An_oGEwf7DGiUqM318Xw6K4rY0yu34lHMFAjwk6ux_uQeUcJ0Qat_h10P66peAx1tZq0lxkZX7v5HJKCxOjejslsYl9uownCGFWyPvpk7ZwEMQt5Ww2jc7FF8WZBhPfBkSvPxQGindrz5g_Gye0WfM2Bboh-oEH5RBO4jqsPomBUXKFgLdMnA9ACNJiIbLsFDgKpc00Ro4J10VoC7AlW6z51QUli4FFoR72hG4dzOEJm0zq0Sl0xrldm4Iw4fAoql4A0jB2E_No8cVHxHdjsC3OkQEYuPdl9PMi8ALNfqKlaBhkLgenGOSXnCuD-8GhzRUCeFHFXHG6xyEeWlodZmATtbhHqm0F4tkAgGgeKbXGdbRGy-njBt_oIslXxd-_jYJ4j6ekxbRdox9xpfcebq9xzLquOVG_MomzHYx08Ld-B9sBjl_3CzmzPmV8KnVOQ1CWXLIsChniNBCM56HNfgHcHoh4IRPqVUh4_PtmRpH9FtW5-Dw2dJYuXFOwkJJ-MUzNxdVhBzjQta9yWGOxT-U_VBQdT48wLD1RFx9DOamJ65H1COwIdthnFk7l-l-tCv0_KgfQnzppQJdXVGF2VfujBfXCCNzyVQF_HahwWAS77KQnofUqtev2ahZzvMk8bDKu7q9UOod14s1AoIQx-a_9QO3NYNvAeTpz1yNm6c2-EIznBsAkXOpITmR5zWXQ10NbBjbIjoXVhU0-EIZOLa0JNuJnbEu_AwMKy4el1BMQ2kkHElCCo15DjqDTZ3MouglZQ3sb4zHfUM5pc0nGGk5YrmZbVt3NYgtA9CuCDUibrlQgLluFd9jSzfbgoSNU0F5cQ_WKa5CaR0IujnwUPKpkE5-4KVdlmXeZ_3FKhSMZGclMVAXyl3SC-9I4KyhhtRFb4JoocpNkjhS21O-yahy89q1pe8jdbUSeCvPt3DoHSTLjtuXNLjSmuvtwBDPlOOC-sEVR8_GbzdwVq_Tkugg4wT8ErAuSrov7VenzMPn9JnG-2ttXevHxfO3RsdYlr0VxMhZ4Iid6aFkLOT-qNaVj2ld71AFcUCkqaww50k4skbFRHX1F7vuHRdK1aCTtFFc7JSTlIElHNnpI4ODykTYRjG3bHPeop41rpWTxC1clJdDfdcSosBsFfEukx-7Iu4mr1idxc5fizh4NelAql7n0N3LRIR5t44Ls2D7c0oL1kL42VKChuUsxnXkVbOIuDW3N1GSNyoLk2qHi9AVkkl29ruCgcBxPG7_UCsc0GmSDf_VcRY7qdASOxIvoALZAzhwBBXKsGvjkvoYwznajbjPhaW2qCaZdJzLE7NZneBsw4XzoUJG3BPY_TBmj6sU-7DlwtKO_yuArmXChCsAL-ItF_Hj5ROsdekSDvlxwy1wXFPJF1oDizO9inPYpRk_RhKaMcaSqQQcOOoV81uXKLvrEOHRmBf-MsfQ2-GiX6U5G0IRzgRgtCtl6srWJXMoBkmtETiSo04pc4x5lhD__djzoJi3XlVid6ahAbU_ez11afqeREP09ZFj5UV7UXWVpwwlJdEoQGe-mkmZuHMfYjG3D0_6SYhnqOc-hgu-L9y5qhjh66TyG8JN7OjYFj_VQ9oo4io0JfwiEThHYVycxvJ812XhUgKCyxz4YIzgU63Pf-TN0VOraahE4WWu8mjmbkydFDKtN4qpdBrZekyAqVimK28n0Dmag1T2PTz2IG0QLJisZGd0mqPHI6Re-qbV8fwsrq9XrbvkPc6eMd5L6xt9P7w_IMmGyXN9t9Vr5B77grhGQjqSB20PAcC-FYA1lG7XajweXC9vaIGd69KfsVWwukJko8S8RNyS9XNmQO0JAPdl35iOIn1yBpMMBBRfU59Zvqb7i3TmeyxiitOYJETi5FiggwZ9em9UaO8ojJblY4VuzrshdlPddN3av0os4izexyI5RzPa5DcHk07qKm1Ky8Wwa_g8kmmuGUns1QzXHWTQYestcG_5N0zmawYzwmpKI7LXGMBevcw-VrY3koPGAdyIoZhCMrta1wC3U6Umx-gAbmB7qCv8TNZApxYQbO8vJ4aNiG2WOWQxtVA3jTvC7DDh_gmi2EnG1PHiiTEi1jDeKmnpxW_y61wIY6M0LozLjO_tkVCkFLyBkTzECqXLubuH7__h3tFxKkMYkEq5shJa7A87MM-Tn_RsCkx8EgorBCb--g6EMJ-HRx0YVOdd_WjMrTKZwwykeJZINbVBDXIao0-tLpXP1RBSFSaKOuZMOsmjTqHCLQJGYKxdcYOiXZOZfKY3zVN4nanMbFsohGlLVUDsz12zZCWU_x78X9ypZcuad8RSA8II8cXCXrsKfeHPhKOxKY_o6WtJxCAMxNQrr4flyJA_18zuX9cT8S-kHepBSisDO5Tzzb9rNa99eRs4mC44c-07w90gsaWU3EcjAGfFRQFTaGMqV7yhbgpTGF0Yr0nT-BKdKqpgmdPCgCicAjOd9eFeY6vE6_0osdZ9B0y6I3aEiTSfcBce3VfsImm5ZeZCi_n3Z2adjCv9hnMXP1DoyFxuDD6_rRXBxWVtOpofcPdObOhrwuBBq6jHodev1cL5DTRWIUPCU2PqKdQvtTjxUP8HBjysN29Fsle7KZGgBcpGboIv8Gz3B2jTRRWdLZKeP5gSGxO9oHkT3YlbDTNNNwQQA0Vy4DkUpHJXdfTugvg8DWUOhnjThe1upVcPHoa9z4e0h9h-2ERp_JCwP9o3oRsJq4_9fLAr9bWXgBbOxDsCdYI2MHGvJ2yxzucaemiNAjC1hNkADJ84KN2RvjnxGCg6KiHVc3xKrxRK6GpSI1l0WiGvUmP0HwGEUjHYU_LNKvvd0vzNzcSdI50BEv5NvElu-HsrHtyG7CYGzJ54tRegrx5_OfYu1_SYXP_YNQDZBik64U1qAscFEtQEDmxoxfu14hIBgwmjY5eA4zDXa44whNjnwSmIHzpMe9NjJdl-LXtbYCGVHx9i2ZAdxPg_JNOKyCDEcPREnvefjnivjkuonFL3W4cgYXWVyRpMmYvZ6G5iLhrAPSuF6SyNhPhBHsQ_aHKGQednxoZmwhPI1ffJPUZowV6cQeEua3g6ageq3hBdKvBiPR5JyfmkV23UEXhz94wjpoiPlDcPdTcqlqBGFMzFLsy62R1iKCCEB2TSzJAOzysWRcK6nYM7XFV5U_WKRJfWh6zGNk3twZv657t01TaNZp6EMNoJPRh-qWKbYNs3DTgjf8uUb5eGWyTUqr1ztUr3547fQhGymPuqYdxX3SKd4kQYUk9cxUVanZc64Qc4czDapb_qzzzIpRv2sQbAFiXYAMhHNOq7XLk12DPuEGhETLCpPUxcRojQ_gUxy6q7mKpO95pkjUvEXLEv_wHnOM3GLTi-4dmulHrPmnfwn0JK_MG5s6BEyENX-Zbnw3641Y-oudnths77KyuBqSbLpge88fO47AEfrUzkDE1WmbxfoeJ_s9AeVutrYpu2Lk6uvdVBQSXNGKkAiiAz-6IHaYXEMCrrfIqw4E6AR1-jVRjFLT4Dh_HHDWv0RgdSPlDvA_3HtCGGKVg0qwaLkwBZ8wO0Jtt7Ca-PdX16UL9DrO6WsvRN4LwAwHCFnwI_BQoY-6S0zKZXqneplRzaeEcHsi-7c_X-8BxZ4oHFkGBBrzEobvo6JWO53gtESnHAcZFvqxsKX5sVc8f160tegs9C7X8WbXxDTCCBWgmDltW-mZeEuRfktvLVYvY2TU5S-eQ_etgObXvL5fbVx8RxWKd56vdmlRhzZVAZsNpkiBkcJssDCy_HjOcTYN9W1EJpMH9VxL2Uemai6O53mW9-8ePx0Lldti-ygHBH0d2zdNO7lUiXcqeFhwRgAtzLfLy9okUptwW8t0KJTHyuyuk1IPnqXQEeYD7GdL8IdiFbjexoLPDx7GfNyMhcJY=","summary":[]},{"id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645","type":"web_search_call","status":"completed","action":{"type":"search","query":"OpenTelemetry FastAPI instrumentation capture request and response body"}},{"id":"rs_00e767404995b9950068e647fa69e48191b6f5385a856b2948","type":"reasoning","encrypted_content":"gAAAAABo5kg_Sp1yCm4uugfmhl_J3A7rbLewjS_TZUss6txJPcX4gObsnuMJ4yy85eTHxOopWB4VbGd7XTRIKmgnESjpTO-rmeur1CBN5YRjHmmifJcFAmVF7DrEfnNLzKd7BY492iflfRV9VPlV9J3zYI5ocBMvyM_MFhp8gCHEVfes_sqa28XGrGmYB57irWufiaIJsqwlZ1VUlIxBKRlO8-peh_FJlU7Jg6BeFbOI7dj7POCyxR-D2gHwRLlZegPSLoeFfWHObBOoxW-PFJTqA9awnvv3GGptXNCtaN8jBJlpzYakgUrWqS3Y-5-R3GUGTzGCRqAdew9WlUnCNf7kLKaVLw3VDdY9VMVP580MXMdlxqfNsL9xlieaZaOWLwR7VftD2aLFCuzjPFkoUOt0x6reM7dxrWfx3C8oIT6E046ispJAZk8pqxgwSaYDAfvsVBG98UgZYJICK-oue0B0IjPpwMOv92pkmXhlwdoE7y4G714HtA3V5JzPMD9UP2Rn3HGl541944qZQQjYquTk6F0wEZWKwYZJOAH66-vxG0x_34W7Tr74M7-2_iH4TUIImXEPKB0p9X_M50WvwSFkETEoTf44bOi7EWEBwxmt4RlvGPj-nBN4G3i4Csr9t54y3qcX6NTJu9dDYfBPZnmCraPQvYBvrYdHjEcESZ4jWR2h2A-iFzzr5cBKB6f1yldsxCqx6Kuny6MtfyNZvAKCWPEB9cHhggoEMzQm7lrsnKCp2sLvS484F3S37qy5XMbcGQeFzCdQVlz952510N5IF8gtIpzMFHd9qUHtFo_DqKj-zxK7ioOs2J0PxWFQC7bz4Kqe85fCakhwLUBrJ2UiMtFqB-ENF8MwvIQOk72LRkjnLwQJM8IS_th0Z0HHXqimo1ckS9LcH_CrMlolcRhQPHVFJzNyvhaKqG54fq5YR3CHoExq4AF8zYbepmhN7xotDHhQ_aHzgkSw-0cy10pL02VaE7_olxKq84HBbySvMnG3LXSJnhM0aMxHOM6Ezq_HiAYWZ9ovXyL0eyTXZUAgx-Mj0FrUaZKlHsyGEb0hkFTk_4iV9psCOvjg_y5T0Cw6iUcoQ61d","summary":[]},{"id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python"}},{"id":"rs_00e767404995b9950068e647fd656081919385a27bd1162fcd","type":"reasoning","encrypted_content":"gAAAAABo5kg_kN_dYVYqoCoqiKAqcYBoZoRVg-POK9d8NoGhEBo5xGroENXeq0yu8Je7BaUvZKN_jCgsMmCno-wzFB-kn199MjhmKnaSid3JuqurWRD1SNCene9GQAL3mcGJjr10Vu16WdbVEte_23siMlMZiOp5EQ08u30pkujK1UVoKCNkCvG6xYNVPN_XYgXGr4jABNFCiHtaxpl0FG_6XcVqeWFIRlFC8wVBmyAMQmfvshmgnGlJa6q77ktaYZQeOWtIfPrNrJX1NAYIWzceSOhC2P3kuxEEThdT7nJqxV-xMSiwvOr_GxTrVT2gZVcEJPT14iMcHHLdVGT7QMRp3sMCBZFeIo_3qHCGTV_IRikEH5JIrJ6QKA--FgEaNMHBqyHNLG-O9QW9h2HFO1hvioa2em_ew42INDVQ5GY-CLG6e7qepgoA0sstJuRrD8t9k1bhH7w8tXqDCK77VHSOzxHKapSreYSKDnqXq6qNaT14bxYh639uVeT01QixfDmbZyim31DeBkW6DoI-49mwiGasuBwhWJMDThIwY9R6azSZgGH3-7hnGeGMTcbbIFPXu6XNe7QeKm03xJM-bJO9PANfBVGpr6OLyGjhPPDhFVJPhRwDtLMEEtCtcH0oYXmX1-3duIzq8Fe0TxW6NzcNfa9QC6TnWrp9EriBuweoLfV2O3PrYfjBsqR8-W38tlnvlr1doHJpvnQau6DbU8Cdqi8YcQ9iZc-YB1WCzgiUz14P7uDHDf3pKeW2khRNx2QNInMlwS4Co6ANeDybA4QYEbIzOoWOao3f_YflkQNy9tkQxX-4pscz69MTyOlX47Qu1oqinf5jIBtNM2O4A2h0eFH_bamY3yMup-v8wYDhAcavca-3O2ADRxbNhDYfe1D1jbomBig5OExb0zlsZ-qNWUGZrvgB3Dg7r09I88QXODVpspr5sD1b0PqfFFp1ESiGLLW0P39bcpDKT2XO8VYbv67_VAdlJ-RiInCk1BqbISO9WJOEKej6jUtCkIObMJmWTZ9d-KbsbOlVv5bFyIjVbp09tmxMBrpu71JvTaVfnQn_6Dc-BGdjgewkO589WXYaB55YiuNCkmsCfbfp-sam3DMYG2oN0drVQPUoShjEA66DSaCNoQqRNBIUKqCVUIIpuTQ1jUuw0eVuL0GOFEVBYjtVV9Q_pGTe484ZwSuvOXc0s7fM4f4IcoYUKjqlubcQU4LcBsbN","summary":[]},{"id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python"}},{"id":"rs_00e767404995b9950068e648022d288191a6acb6cff99dafba","type":"reasoning","encrypted_content":"gAAAAABo5kg_k54K0s32OZAU2_Yl-wVjWQMqgM72qY4zVDgmF55Vpagp213AcsdyUqd4C-N9QLt26OyQaOQxDd7fEv1cIpTslnMQUPIwtCUeAHKl5o0dqFphqQqDI7UtLErXrq1mfs-V1jMeXAF8CJZNZpz4KxcnrjAE_j2fHZ4TOq8CuvaByOCvmK_k5fkrpBhCB1BxDJUZIoHFuHuDXG_GejaiMPmYVepcc-X5MqUTeWdK9X7BzMA_Cqg6bqXoZNnXr_s9UCcYR5WW48B1xCfQHF4ZQzIGby23Xj2txIIPa9hT35rzP1LE89xGh00vlOshEwG7GlIa4BOSy7iymhCRGdX4tGCLOuFJaw4wRdBXyTthFxB6DhkHUEzYlp9a1MVqrJvruFUa5pqG_X8FQzF7B_EGk9eiR8uct7e1wcrXpSpEqB_0rwVTg7mtKLj9KR7ytU-D2-fUyQZ1I_yUhjPxozaN4KtcnvJgrwvR7kndNzxYChY0mQ7wznp19NhzHX08_iVjTGyCSdU6IfKkfWahE6_mjqrBNDKQd4Z0B4eyXz6ZkBF4UrcjSfWsVJS3I9kvJ1TOAz-26SAHuWefvFEXMAQpen3UZqwS44z66CR8dFrmmkYS5EP36KhNRWGmGxFcWe2iM2LS6u6QTnir66SHTgugczKfPHYW3-GtYskI1GF2Gmaf4Xzrfa_A3hmxQTYOVBAR4fKfDylPXAuQZ8fdxlBsg3rSKKId8xLp7W6lVyN0qWZF1lROLQKnoSjZGL2om90MpmQsPPYnerhX5HMDQOucEIsz0PlpQEj7EsHjA6b4BJRDlqx1T5dN-GqmyqQLjA7CCo7oIyh6MD_SJvtbRSKMwVEkLQM-UOO8L6o1R2y4q0t_TRze96BKSP_Lz6B9XECg-B7AkAWUaKizRXV7m4yzodu83YdEx7Sc8KMQ6PjNd8xVY-DEzMWGIw1uGF0XDSNyT5YT0qIq2MfaC618LBqRtJXMAdVabKoqvFlrhyUrfCmCQgCXGyQSPPyc8zQ0gpaw7ZTuAGEJmPNLwa-WDPBT14UIs5uaSu4VZ9-fgBKbANgGcSpPdUGwk4lHwGkYzY4NelS4MfsojubmqgKSSszpEuoKTD8bzbdrSEesszfenXL9EfUJIKM93M6YWvKxivwZCFHVJmuP2C1iQae6ypXtXCY3oaUjdN6PI5_6r-Hqmx_9SDArQN_MrkiB-jjohXkKJF34yJqyxlSbP0bisEN0YPap_zMITgiaf6AVlTk1r-PqI7H8l997OvnR89F4C-7WbfZ53JorfmpS3mV-Jmx3wUW7qyZKohRVmIGFtCxIXVSLmj25plxch-Qdw6pQmgPQYyuch2gzziGj5j5Al4N3jV9BCd2-IiRj49v5D05taD82Zf8=","summary":[]},{"id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2","type":"web_search_call","status":"completed","action":{"type":"search","query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY"}},{"id":"rs_00e767404995b9950068e648060b088191974c790f06b8ea8e","type":"reasoning","encrypted_content":"gAAAAABo5kg_3PnkkTC-1Vl-yPxw05UstnGmJBji8GLZs-w_dNuKjCXDNbYPGdLLKdiDxCVtW02Uqczoes6bIFeXiYtO1FV9k0nUcDvggFYPMOaVb4XfKzzJhdEdLyKEUnqzGIXVflvI7sUJ7JNxsgR9H3ywKBbs5AjoPt3HpLEZi1cLRZvbKvceiS44Y2lUhKldl6PdszZmp_LF00K0Bkcbu3UHnCq1lT5MgGboSE1dRJZdpWnUzSG-wcSset2XUkmFAT_I3JhGfYcLXrUN7YxCB2JrtyiExIRBRyePqZDLTmE9IXOv-FMvzIUt0sBTb1KbUPLik64oVIjf2w4wa3H9MohtYilVeavn7ZA-rhE8nobVwunG86yJM-MrkyDuXW5BWiDgxxbz9srIsWrIV_oJMjAxdqijVKhDTQXfbt1xb_gbdfHFBRZZ9OtynF-JoMkwgH87V-xB5a-z1xoa-qWLjgWC8sNgX1QZ54wygIIbJbeKksR06BPMejyLI31ZuiyaVb54mIarpiAuUsBMNALQ2PTRYkJHGtt5O3ZZVD6qZpvFF5oTUKNpu4fYZbiWsXo6vbyBnwv09B50mOajYkQKwBl0HRygSJGA29MePrjSX-gd_TW0vGSFcL_7W2WEDL_o1HJI_1Up5Vj1fhSIIctk3OdEsQ1_G6Z7-xR0NEMY5RHvHc1S6i0jrnhRhz_A7luMP_gpIxAN373q6RkPo0bFbNzmoNL3k3HQ9UDlziz_r-0FV2tDdX5MyWQAaQTMpl9PoSH5yvTfTgVPsEbkSKN9RBTOXpRBGflQAWf9XbJYGV7iZA6TdJBJHY2vus5opufgYg0msZS8MvzKD7YRrPT2d14_bTXWsDT4gjESkaBiwc6razfqXlkdZgpfThntxkyCv5sj8RX08C7noaplaYEgweJhTzHSNmCqBVTALPKF0QJsdvyfar2s1z2ZYh8n0HvXFWIYBFgn_DjuHhcQFxk_Xs13zu0OnuaTXNdN7q0qcpmHjo_RR3n1DpPnNDn--9My8-ZWrEePeWO9SvOZajsqwm-aEtMuyEMPY3O4muTeNXWCP1ge_z9E27dupvGn3m-tNuBqzd09KxTmsWcp9Un1agDWANcfn0ASHXIWbiqOH9qWFNFjP3QfPPO3ly6zxkY4Vhv8vOlAveAgW_nHS3NgQIU_W9fHMynwh5fSKamiJfGuwK3z8pIR0g_ZMyUtUBopgSnN_0DtVwvm0wYcF8-Qrm1W3jpuMGFPMjyBQVKBpO9cuV85RRJtBr7iIAj0OfxCB-UDagn_UjRYEcTOkmYiu1REHSD3aTTNfn-7QAz2oe2kPq_JSAYGfdANjndM6RSqdJu21MVt1nlb5J7OTIvHWYht56Mk_taCzdgS1jQRDA3wC3EEqfrUJMc-UfCY3cnIoDOl5F-LcD5D_b1rhq3UkniCWVMW5lxlc-6rnJyVSlMLIv-HjeRi8IbBb8DZLNLlVvQnVfx-9egN8rZ_PjlLYfUzDefTAWUdQFCIL_1U2d2XhmkUOs7WR2Feq1Tfh9kpw3_vb2tbjF4l2GNwdOZUjtSASL9fsvgR2AL_qRPcPeTo2_ZrxAcQhQsrjbeo27wQbYLWmbKTWpdLEkXYrSBnr1cCnU60tZ_VRs8Fek_pPLX-kfVMfUclEFGcT050ThUm84ziB-XeFLka5e1zWrRg6tbMFrGFy1yqxBZa7XZEIWEIYWHooxi7UVFiyipqFAiQMevPcJwsgIMM50YLL1yl-2VPNO3e0qbVFTvptvQY69sLx-jniWu86sBN6oCEQBXAj58nWDvhhkTN4BUJbeoXP5Kbjq3ETs-GgB0E6Sbwf-som13glZ_qchn2Z4Zv_unuKnnAfiDmbSXKkIoSPFQC1aayI4qVMjaUeRyvC54niGxn1KtrKoPTDLAJ3rW8slBo1_-txy8dRsjdDzpGKd0fuubjWmdikXVd2gtE7KAb5awascJs1y0VaP7kJL1MAeTyl9_tC6OZBltSCAG1OrrqROLUvrtFV16cqsrzldHrI78S7MK68TJ0-h_HLI54B1j3rjsC_IUk5bACpemwwQtANbN1ARXLnzk_mA3C9tuowN6mGV7rRyWxJKD0sVlSrM9v7HqgWgBdiAWzmRVYRDoQ3KHRSNPQNDTSTrpw9MM90rS-DZUumTQDQ-SOq5kdxTVem-G4w42hhIb6KkSsW65kkcZkkqpgon7OJkf3MAqA-MhkDkYsWRtY-W202jIKXmOxcboCqPTnPI9q-YCUBQb-nYgqBNjYH6LBGs7_p7PAgHLFLV57j4Qh7_zMBVX2Q12TN4wgLFxp1nk0Zg-iwBR-f-0An32ujBDvSofYwh7TgZwOUaTRIGRvJljZ9yD4rUg1Hd33ULH2--zvjmySiQLGYFDU-cbVzi1SVZCHJONxB2VgcINcYx59UZROua4GUQGC4PKkVZHFYNmfFEBNX2TU4PcpDKH-c6f3ZQpbHIuTv9CkskxdLDU=","summary":[]},{"id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911","type":"web_search_call","status":"completed","action":{"type":"search"}},{"id":"rs_00e767404995b9950068e6480bbd348191b11aa4762de66297","type":"reasoning","encrypted_content":"gAAAAABo5kg_y4PU_steG8-2WnHp2BjMRNrLizrgTKIRAvGwaOQTvzCFC9x2r_8uPBu7VhWXYk7WSgzy4toG8PUYNjii_eppkdKJbKTxugRIKqHQNlcUqqoV_BUETVH4_gMfydnqWM5xOt94CQTWHHkTYqF09RtfMXORdqeTHMyAP9WjWPGjTmj-NHgOUfuwl9PlkusjPhdvN0_-nJpb-hUVm6UCJ03MnE3aber5Gw3nM6CqjQ--pgaR2WJSohyWikLB60rOQeh55XURNRI6Pt3MQHK9Cmo4Dm7botBWXCdlTS5Zi0-AChluqoBitnS2ScrksTL5RoSU6n3Oje6ySOg3aYVtIjfPtERUk466bqiZolGFoiTyPLn8hvvivB7a_J-DtZGFD_f44XzNLlFbmmM1YrW0fgJQAmXoIrWgJ49e6yTIlJNPVfnU3J3qMc-sJSpN9HOwYU9hvSGoNDjiZ2oy5j6eas8ED97kxbAmLn4j-ewdCG_jflXP8YcAsjB9RHi9ovwLKqJ76sT8rw-E5UlDBa2N0sPTAhY9tMFAlHHygc8lSCRxcOMQUmVtnX9b9LGe6gCwNWoPdbFSHdzDPSHUkyIUWgaPLsYUSmNMP2b8dM5iBPHs5bLyFG2IwT9QjdUu4u9OPCSyEy9Lpgy-pktWGyL-4m6VJIFvHe2HvMH1rgHcTkiNU8tkPzdT_BySHMqOgFzYeGf-x9ulW2TBLNCpkZmLloeQU6voMRlEYFnJ3g4SWO07Bc3JP2o1WV5sMmJozlO3KHk8h1Y4CpXRYyo4gYzTWJhfxa65Tk7Xth5JbFtW2F4Ufk-FkSi0zsLLhvPqQyky56x1RQd-kCmIPzNFzb3_2kXEySRNRGCyo3G9MzDJUU4ThYrXfXoikuW1Rj-GpOzAU6NTMslEwTXbHc8MOU5fNW7gQ48D20-3MXjobW3_cPZLjOAofWGEyZN5J0_jsEWM8XpRhEXHPcfMbTh-Az-pL8fCvNv3dH-fsj_iC1tTd9f2QtenznAlkMJK1qZoz4z2674zdbltWEZd9iu7Dcbc9br7Z9t9XPuy1XzBKGm4zDeVAnFgOh-2vHBT32BIHXN3qWfP3uWpA5dPBvw_Pr4hLXE6A03_4urXN_bUFEr6P94zN90Rru8J_sOP0x8_HOjkXMCYX4jmEo8MtVqjQRb7uqsbb1Qko8bZdDHUCQtgjnIokngyTjCzxskxUCxA5Y0if1hTctbDbC8KReXtLTlyaEVUAAlbrhwp1Ir20cab1H_DGNxjAz_o4flyHPm0DSBINUjNg1tj0sVPsqUKpwPGl8OdV9XLSjusKxrnUhPf48t25DcfXXo_CX2sZ1n3T8rlLuU5sDQUW9lNpgtMRS3oRPx8lZNXTrIL7SYy0y2CqVFCQfv6iskVYhnL98cli1sGCRt7ZdCU28sW1RCdvyq9AL4N65qPFNP5RzZaf5sO7U9CJSYwNjUbzvPY7YXIvpSvBSKGOqE1twPioyF-c7ZkkU4lPB5k0R8M7wTb7IwxGdX1NiS5FPhrqe7h58gEUIKg4oA09mHr6mZuU-rH6_3CbVIQdedfQPwgArHiN8vTBR73EfrywzPkvlXKxko_43HiQwXXj9ZaTSZKnfkU3WzV-5xjbjIOl3NrjjqFuTitkEvAZ3M4fyW9VQlr966JMKO16GA3te4jQ6yEDlgq0Z7yduXEZ0aflcZkYZYL9M1a5hGqzkb0lPvDPGW8aYjQ4uthcEre","summary":[]},{"id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148","type":"web_search_call","status":"completed","action":{"type":"search"}},{"id":"rs_00e767404995b9950068e6480f16f08191beaad2936e3d3195","type":"reasoning","encrypted_content":"gAAAAABo5kg_BfyVIs2KCOfcaV48FiJ8R8MAvpEuclA1XLwVGsC8GxGSbNacB9jncfhfvQPgMcEI8_sFCrZheuzRPpAtorOTqu76pvawISbRKLoBs8qzp-IcpCHT3WGamBqBzr1Wq-Ykp6StSreWlzcFkG-WofGPzH_bruRxEFzgfbpfuumlCU4YX6s6mITY0m8JCHX08FE4kSSH3xiUXXyiLC3czwFEAUqOkKhFJ0555hEpSs8sgUMbl7J60R4VtN69ToTsMiPoLhLV3kSyI2KLvUN5Q42iyHobn7QVgi0p76MRJEU4bXyqvVCpDTy_b_HFDRJCrIG1lyvasm-ZbwmL5bUJ-ZchSytzRtMAJF5lh9rt58jihEvi0eaz7DJ6D0bkGpMln5HF1WjIFc-oYmJaAXJov9tPayrTcE3TnlL8VV8Z3qiKXtvbiRBhiaLjJavpxpduN497ijtOnz9QYWLR9RWonLS9TKOkBqm-gwsgWPKpMwfOO4nlUG0OCmlPFXoyVOle5pSkbd0-fDMCkYVLlXzC0-Eth09l5tJemsJNYrmDqfwh44nk3j5HX3pEK1k_aFetF8j3Fgwy7DhQp76MMKZH57k9YBwIe7-vBP9x4KJQpxRvakony1hLS4N0bJakZjaij3A90pOiOgHl4VnguUYyyhPcY4TLPQvpPmCDaGWBbt9RQKriIe2WLD8gJ4xU0pOSN433lPYXYTHH_gAfavfsgrB-gK1rOeXuEvc3nWPOAAduI_MHx-ciPpCuyDeUEqaoDQmyIEcAaZg47SjWmVcb34c-tWfee0uNkLQQSNSLgiFbTerfHmLFf_UjI3LMWCMZrJoPwCZxceaoc0CWx-y1w0T93n9a_wc3tQDtdMGXrFR_5Nl3c4NiMVYj_Lf0wSawgrtiPET7_DJR6SLsKU7DgIpSyJqgi_JaiXtnN54KgtalKtFmdp_zIaBaNBKySj8l8P1Op_PP5Eo-adzdVEXcEH7ewPLFVr57PQHJRySftEox-ab65I4SFE-8eMQhbVp_WGcpMUXUxzBdSF4Id4bCGRfqxUH7a-Wx4ISD2XMfo0zTG-XrC1lAGfId7A-2Sn6th4ycMKNHb3p15dOmaLL1Zkd46VQJ6t0xPZOwB6pKS7rhd3axblSK0A2ijuA5OWstrRpXeu1km3IqRgsqOsxZ4nMLL3DLsD6-5W4wOB8Qt4FSuc30s_tccIeuEr9z70dOolipeKC2axYGKjMpc_TNDJnwBhGuh7GpipnqS6OWZMKmpXOoY7yvkC7y6U_XFovpWa5SHW2MDKf-qOuHxI7djELwGVS419hbHkRku68dqCg1o83CR__k3IpU15Gf4PcXwTU8vhbLAFZ6_eDCF2KZw7PxCNWhH5HnmZ9TwNO9DaBbl3f0VtGaqu6l2DRMT3RVD9BECOPNnctCHzozfyrIpTmMPFZyT8daW5HM2rChrFsz-cpknuj4nfCc65_61SH9fUMsMP0kk6NF9xcRmKDAeo-twGN2rAsWj_06K2QXJzLcXm1JqvNlo1diB1Z5bFnB1FNY8EtbHS7yDKDWVhSGXBz7bfVzQPynTJU0807yUE55bF0CbeFZyendVDLIBS9Zf5_q1JmJNTfMhQHQkdaJqbjrqSi-DFs0g0Z_xpcW8UCfukw=","summary":[]},{"id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_PYTHON_LOG_CORRELATION environment variable"}},{"id":"rs_00e767404995b9950068e648130f0481918dc71103fbd6a486","type":"reasoning","encrypted_content":"gAAAAABo5kg_rOCWwVycYlebbwzyQ0iLP4kA_eRibpv5pLFDHOfFeX-37mPc_nw3EG1bdco2ABtem4faffR8smLbG6uLSdRdWRo6V3VaVYVMZT7kN-knJLrSgTMPuqg1syrk5MW9GnQs3StCbDINGIjpFpCcrkT_wE3JhTUrFLPojMHEvSK24JvcYMKQr7t-zfS9eLitx5edQOFlULxpofOwAaTbIoGTu2dOA0BZL8LjXo2-7janXWcpIZQij3YQh4IIzvBdRHv0fh9LPlxWgr2QKdlcqA8imrBSc5EeBYWcj9uqhMtcItqGmrqyqYElBEFkouFDFTgwBvWh15Lo4oKALDQWATVYIBbx9VAHfMy2W8HVYz3B5hcVWSltXGSi63QGEdrsxqgVwntUKUhCH0r8Mv9MHY17sWquro1Ygv-hZiN-4F3SJ1E-ftaCtJ0OGE4uDV5kzYoJOW30jsym-A-eG_kj6SVV1qrmeiFRlVbcwSK8pWxux7bymMUjgsnMvEc8Ty1Ar3kVsosUG8iGqHBzwl_m5oOl1FCUjOLwV7PDMhFDKX8TFpD2OA913b9q1--RY_UfXiBvV29KZAzqQJpnP_fTqsteRujS3ocdwXa2yxB3pUnXXJnzf-rXAm3oAN3dYKXnXRj_EklNdXCPsdhGgnHevnBl3xK-YTLuMuRyTFUmCkc0cDYRic7ke9N_nsBcNHjY5JDMi_R2GVOhMlmu9BUwPqcOlzlkU31jadZIlfNTr_P-B8mxEdPhW0z4hS_vsHaay-fgNobzX0gn5KL2tF6rOEiajfPpkRiynJFd4JxWEaKLqbsoWTvqH3dI4_wLCQMB_cShgkdMUylj6q6HUcu5xjsrpTLld4oOxRVxYIky6lhT4B2AOcwB3LqJeMO9LkyfagfZZyc96obybLe4uVNtMwCoxBkI0IpGoJJKsckg0FXCDBSWcLMmyoGkpUXI4GD_gHih6wj51nD9MsBn-RE1MbaL0wQ3vyVBl_5P8qVO4l3OzK8CrtyIohzDrNKWtLBSFuXlaYgtLq9joQzULOhD6qWym-dyjXpB5bWy6ei9Bp8ao8Mxwn4EPmYwsTIRQYhBN3Ys5bImolsz0NcRYOBequG2YUNFgO_6S3tuzoWeKH45ALb1UWJY_a2_8z3GT_kygDm-4v71QS03TBQFIaTYaQqQXZnNwZ9FCWNfSyIKfFbtWY_dSsKePozwy6baq7pQRc6DAnzlNpMRGyDqCq-KufK8Jvg3Ox_CrWAdKH110hAke7BckIR3nd_QaKRvBllGDHNnyFUvAmwW5mesEb13eaxAhvDIt9DuUmQp1bWWOhKj2ApdhXfVpp5wWUUvCzVSVHrN6auqA7tf_kHXmWcmDiwkNcRtEwug5HngUtdv9qqzDbamIxPwu8djSTFmz7v03DyWC4a1P9NBhD75nQ4lVnVw4wBT6s_zQec1RlkXPLxf-75w-oQ-LDpog-bdid2IRlsXN9xQNm2j7G37AZaCx3pTnpVT5XJ_jh3V5QALDZDuPWI7U_8rI-MM6hkC3nhF2f57cCoVVWSh97IJsiV5pFZaq9iYCQpR67N2Q0TT8bDoHS2_13ihrRyjMsgzhcl3W7wgY-zrEb8r9bA1zU4aclJZ4tEQn3_2QjSEKKSVRySg6MsStIt6EC7MS2SKzFbQhM6oqWPhcMhyLUHcVQT_ZUzOb-UOPIApBdGTipEXu9zTQBdruwvZ_nSjWX8kLsG7iTLol3GDXnsCngYLX-rcZc0-WpIT9lsxEx76Ha9GHk4vVaxPoSz9erAP2J1Ll_C0r3vR1Wg3mK-0hPWPGCfYp490f73N7ZujReQTRtnGFWTPT4R2B9CZioI5o9MmpJwsg5TNZ0Nt66KPh3jSl6pKM090Orft20xo8j-TKygOOTSoO_UD4Hk1TBm9EDeD457-zIVCxGXwAtLvmNcBUBs8UpTOyxAeWc35GxP3ceUmGPPjqL1KLNFzrn8e8ujhfRlwwTXOMJMMgdt1tJO3OC8irJLeD2bjuw75TXaI8RiWLKgei902UH1D6SDYE8SgKEQHeD4eCbyptrI7jUVFLnTGn5pzev1Dg-Yaoil4HcqhSy4k5QNQwLg1oHc5XJLh_Y_z6OvHswzLNKBsZhivWlcG45yosnLajunXgoPIy-A9gylwkZkZ8dcc0czWdl3nNo-P7IaPtt1B2ExxCYzTIspFTcTD9TCzytwE4e_8YepJreiKsU2NcQFlPqYFKlr22GN-Z0BhY8oH5-LWgOCBiBhuKFZoXZaFPOqK8657oV69odXLa-5xTmy8mWdHvYA2Td2D0X_awYUnOhqOdrhalS9djCyd7UREVzwDcgwgpjAW9LfmY8182atvkhwaJ0deTWmX-iR1r0FB-iGTa-BgLkjnzcPiW_v9qCD6k7ch2Ng7-aBl-4mueNp6x63_wBpab75V0tKwdjcQA-WBsLULCl3Oj2i9q5yq5kaqD6Q78n4vvKPctwKHmaH-aPhEQn0GS200dkkxzRKHybnB_ZdJumQGg-5RS6cf1p7VTW6LAVUupoMiZ6mYhs12siSZARYXzadWlxq1THqw7p7BsqUWmXNOrJD4h2fUz5NIUeXHOnYW7Fr1AtlZyj0BAru8PwJ1zfAw2vECVRQZEAUoRG11fnaS2s2NcLLuuAJdfbQwytgBMI-d9FvegcAwZsDHa-b2kCB_2bCSrCPUzDwL9ObWUkYTd2A_F1KDq8rxVd7lvx_ZW0KGt57IZJVHSzOFsXm8khGrBxeyB8IPkNIFtNmBqCTu1mHiBps4lihvvbsIecEZdKynrlslSN3sNW8u3sCpcpekgDt171H6LSXpsHlnuNAqGnuPMdQSSLByLHo-PnzQiHXU1nlqSpNglHd9ysh3UCUZDJsV6qDPZglrbAnOtQeRzfsl-0OLjAKqclB8UxUysLKR8eZNepNBUpidprDrv-SVTGzCBqOrTad2cmP1qn-YAEdbBhMUJpfKquMx3XhXW8BZKAtP_iXxD689HhXM3zEv2Hmzw9TEZLkn059eo7ADsd8DzClpqDDlXCyfzu5L8KoCVNgAi1b4MmSToj1EegOvPQLDVttHZ0siV-nOBmuAFFti_2uKgf-hHWABI6lhqCqEAM_VHw7mITsSPhg2ewVxKDEBKNPIRlkA1j8aTQ11TqsrxuK642DxwV5lOvC0729OZghzAewTb64ZiZTa2lQqoPOgF7eok2pK8x1ra1A3w-tD5s58sSoWN7GrMFHK9Q74OTrCNWpeQP3CFNtJ-BWIdgfNc8BGSbgB_DugTFriucIuUyzpFX8CPDT8P9SZmENpy_bQeOES6AtJ8RdGX6zCiaUQYLUok8OTfzIc_2etib4LXY5ywyZLNvm03nfRtL1WolSW-HaILqg-AhPfJIivCo9pWyN-aMQKbeviOOwDLdm905eueShCFheT6Se2_jvzZiNIbSd_BUDvarvRF5HM-XEL39HUthreUUSAa4vnIYgxF7Vk3wXiwyYv8u81Vli2BrFqxzJbuqNT-PVuf5ZHsoGRMKSve7bpr1Rpoy6TJ_a8c26FoxtcmR6cMHQszUR7WxA2nfEGWs0--929wVIqbp61saOgIDs0O39FZRtPZ5aBWDazf1zzlAczDLY6Dtehp65rMRTaFELgvDLcwFltYb7fZGYMHUq8vFWUnDHJevAGCviquy5JUhqThQsci_wEAOIP-OohY9Z_j0hqRCN7hsKz99l96qEiNtpJ85J9kJ2AGoc6UaWJDUPIS6uOjdNhtS4NyjChCkYPERzzrIbozDTMhWRA_PziOWAaNTNgVF22YQqMA6SIw_Otpq5ezcbS_SgbexeyrJ-warYIwRA4kX1T40TiSdC31cu8dHIVDnkx_fHTLuTNThtQUjWq6U2W9xOMEcZiJn63u23KqUimrKIcYjNxKdlujM-BvDJmrYkpYFV_zowZZ1RPVR2jRFZTBMB-d-taSCIgToBzupM9utIVUdEEc5xP0yqflftaHhnKMnVIRCGHwaeLGeKmJtIjtYFk7ZOzsas-4NHSeBUfw4Fz7wQRlhogfADjWeTNqm8512Z3nFPDS-5uzbARKfhzCZHNoxFKRvuFKApBBTQ-V-OI4sUKeW9wagVoRs5Smsc-x6EU4LBrTLStFPEiMmxvTud0LGZrQypSUrZT_sWkccUNrF2_8D7bgEhUzOYHqmTKyJQhOWXlmKvhioX5AI3tr_cbvRJSPSAx6KfoIFKKaGzvcSnw6iOvNd9i8YmIVWdFC-8GjMNE7k6D7XZel9E7ks50Daf1j2vuMAB12UEInT3LZPv1-IbkimxWwmhKB9vdJSTbNeuy6EN0KoPW1AGnF09G2DXQip7EOqEAuS_Blk4KKvQhOJyfD7QtQ7_cPaZOeMykF9OY-luiTGq4a_MGA220v-sOjn3HJPV7iRuyH4EBn67ZO7HUPTHWociFOjHojUU2RZL2UtKn4uEiWcPBKPDeLK5uED9s-OzKDiYXFj5TylR2yGc5o1ye3VoxrewdN5t7QXcxVnuUq14JeMADb2Dd5Le1nKrbPEo10pJ5KC-Z8lBQ2-EnN68so4YebxODiRwy4FPkKIAeLJd9Wuhihbu8sKujVY5L71BGIgmJ7gaXZIy5mk_55X6-IUa_qPnj9njTgd902aSuJcLwEH8OBuzsnkFIlySWvhA7SOHeXsEyqkahueiSMm7E8OGeGJ7h3NynOMRKWGFrkle2Fm37nrUkIVDUfGeXsDcNTphGrEQD9tvJkJf1CIy1LLXJPtUJc8ULAsPjgRAuXByjNeLcQrRVXPJHz8cQLi65tAtTo_l1i1Q5J4gvIuDINBBRtr2erulw1m8c4E7EtwSslAXzXxgSX0E866RP7JhGTjTqnlZaJxAujH04UvS9ImwJwJkcH7TVnoG1qTxT00I5SM6zjiGJdSzuCpHyK_92sgC_O-qSBJsMQ24eX00yKuQ-5kjbjxbpNzaOR015RUORTpY3xisYtB7FlTna-jG3CvI8TGAq4397gmdvwmw2LP8x5qVuwNbYgJL4LrBRE8F6bYNMNhZnHUJ7kbET176vBUvAntm4OamoH9J5ZsQm6J9UdYl3DgrJTRfBbDvZXMcvpvwK_LOCe7TB6PUOUt7pGiGSIXd3Cb83yHlds9uQx1YD_hT4vN4aKZceIWzLwnK5IVSzIWo2iHuhlqatpuTEKeBejxpb4xidhpLtn-ypXb1GCwCTqjbFlTweVCMcXeMMMSyRR1W2Knl_CDNWN5lvJYe5SCTWwPZ0J-aSmNj1pMtAFL2qDJRatmh9s7lMpMvcairMB5DW7CREWt2QrqPHaTUNOVY-a2TC43muI60MIiO-r8TVz2NtW5Lrj4lhkI4cRjbidyR71NgD452REDrp8Yg83b5e8IhHGvF_0Qwr_ao9d9Vj_NDHd2hST-Og-aSogpYbvVZkC835krUmYi5eTcxceldRQsSRi2NsHcesbvHZeu4STzhR0QdnYEoE7eDOBpjRLHvsJo-x1L2bIUAZUU-PeWQui9Hj_uVkJDvKhIde15VNlWLu62JcZfBCG2i0c6K-40b25Q7l2r52X4Mdb5itN6gOhg9TqJSKr8qNMmruIPUmshOzyJKpdm2rimw5pXjbee3PmYkIftFGct7fYc9WHIFLXhOBhX4RAPMU2PohNpWyiF3scOMQH7TZ-YOfejnjpF9CgppXCSy56oZwnfe8hZM13uLqbwQYBrVA1J0RThqBZ_0-m8P5V0lpnztH02TNi1QFNWGimQWVeuCYluPR0SPscyd2cH1lsINITVBXRZCN4gacGoq5APPObOVL20wrhrj0yE9POHPmNgmod6S6fxLfpqx4hRVNj9lOor-pJtR4NpSCiJJwC4tW6nVICvGq6Zu4Zl_uvxqQ3VisdxdjvWyfj1n7qfqxc-K8WiCJxUusFSscBDcA2AOgl_GbsXm6JAMqWHdV3EQVAQxhjn3K3wk2Ob1TOA8_IvzB-D_xLNMUUVfMjNTq3amyY4JX7AFrey8FtJ2YzGURJr2KDzk3QbLSNzgqE7w6BxgLf6m4W2G3d63Fg-q5GaxYZjNcFQ70WXu6M8B7CliMYUi-CHiyNe0U_wloGoxNHC__XrL-btq-4scbRiDtdZSngRoM89N0s-hGObXuKb6oVauH0QA5HODqXHYXHgYMKW_Aa26yHQqIvBdd4XQ-aKJWm27OdtdmuA8FAhXirmlTTCo4jJKMo6dn5mgpHOLya4nG98eoH0Wj9MutktX2hA9wCv0Nm-QvItws6E6oYj_Djf1ZjzFleurXwvSAbHqlARaJfnFHE9RswT3ukfXHpjh-37O6muNMnlUeIRYKy0GTScHHZf5MgRmEFo3iRiJ7ZPfsAhJoRJLHsmP2_DTchGmTE7S6VT6KAm2Onrb1LaKIGGIlI30RMs8M2r_YDOWwjXqfUd3KYlNELtpx5rv9qzahOhd7SOEWWwpf19sVU2NNK4YW_VhGyOMeufdWk5W4iwvvGbjjoFycoFGNIjMPf9e2OeTsgkaZbWVufbwjZjfuU-xQ7SVfroo8bqybAEl5evHnBDKCFiuhufPQT5Jy2dUdVMDNIFb9nLgYNYkRhTtt_gtqLyLps2d_prBDy-AnickpdnSOrCRBxNjvbDV_FRWhIjiGtd97pXqtBUejDsQwZ2ALv052F-BUeYODreLFzwKlKPD3sPDVcaoOuXZpsnDbznwqI30lpBvYo3kq3q2eKky-GWOUlUUJ0fjo5sZm_XVu6oFOksz-kwArnG7YkJzeW5ItoffFhdPf_RkNJeuNx4tzQOtYuiZ0ARxiS6Ms8AJVyXLvGr6CKMqFYdZlQd8vr6RoFYsp4jK7CC8NBcA9gOAC9aoV8ta-4Wm5PEW9wCRBizjlwMQwowsDYjRb4wgJWDECeoe45znYTAKWnM0QxbbpCETnMqpCJP6gVfDmi4xFBK8Gk7CpKfYzukEMCPXTyX-xfJVim-DbxnttxVHvpRtzjNBL_7ipIR9Ikheljc7NmThosHnj1SksFD2mmIee8cYTiJY3_0cd2BzF8S8y9uB1q8ZrJkfD3WfZ0JNhz4J019vU_iBKqopV5CLazwBQrahYMae1jiHAeCy3_ToDVl59gWyCEK4FraAWlldkBwYZ5-Oa6TtTYNsrR8kxAW-XkTnMU-72j-iYf1ageSE3GcOEak6RTi1o8kB00t4NCRy8OJF-tDM3D-coHGrRSXUDdTqqWNWfDwvGp-I7_2X3whAWWA5Q7lHOVJv6DAvlbJ7RMFnHHphCQlxhnLhWOzP7Ok9r4Nl9KAGO7BTi_EXjQ-DebpDeCju8KD-GiD5w6WOSLcJCDed-uS1QHozHnt0cm2ugWwQeBAJAnrNBA6yBsH6wsu-Vdk6Ix70T00h00zKNwJyqRgG7Zca27q9nI4PIYRciXI8X8F6HpzoRVg7iKJpMMPejtF3ZL5tWG9pYbUysPWSyfWpwZKxZKtV32htE_JAtHi6PnvneirgNaNSjHcmJeMjJPk_Z9z0rUtiFpWoHarXJQpyyi5W8qIeqJXWzjwua6FDRc7OMOtGRQ-slqXvuHXOnQFBX4DdJngcmQ9cZw_KzOEcZynZNVw_czjxMXKhCwQiZJou-liq2ImLQIkiDx2GCKy6EUTlp8GGoX-VF2-hQIleLmC1Fy3ZrXR_4IEmLx64vD0pAUfGO_FRyj5nlDbxIwtxghQI3Mo2bA6ePwWkUEVTVLLepeC2s5L2Rvf4KQ8ieQ5e81FWE-lZUhWGNar0GCJuRvevLpqSGfSFi9LMxvSZfA7EBwCKQJ0jrL2Z-wsfotqa0DhfOm0iJ2qxOijclgJ4H8mfJJrODkoHcJ8CMroB88KoS7PA0q58Nh2vktlRe7gdwO1CMZ-XO7ZQ7HMSiUhTspqekt2C727tyBt9_Nw-Mq1yyV8EQ1y5K4gCVTCPeCZEamditUq8a1VFCjRDAwreRWw0DMIHEYp2Kz8Qcfph9KGwDhTSvR1hOlYOmujb-kztUC20OCchX_iuE5U_dX9zo4xDq49w-c9tKORi8pfBXXb0WCf6yBDrLadazmNPFPlJd1eWnLFgateb_p0DVTIM9_JyTenqxZU1DPwjxIvO9MEXK3jjgC7tJoYOM3olvLgfy3OKv7S3O3vuGscBA3BKR0ndZ46q-x9GiEachS12NOBajlZ5kDE_7J3oCA7jXiT2O61Nz8Oxx7fZnKh30F8FIegvkwJcH_BkZFuXR3rscSkhq0jpUNPDfsnTYd54ALUXdgzOs9i16_X9RXJvcYcSjD5tODZqcDbcb1R-ce53khutjhuT8aGmm40ZN5kCjz9KwKJQSFnr85aErVEIeGYmuyvS7hnNT025zKL2pJ0Ow4EnMzbOQ4NhK5SnuXNLwwjd8GpcLRmlGEMZj_SW8GTP-hjrG1KyrchFFvEkgtfWqlfOnwRNowy7AbDOUJnIa8_9Dali2hm7huliL4Bl6m9PXB2y8tYnHDjW0PGMr8C51BL_84En0yAnvHGoohAop0T1UWaWQXG4kO9t5m0bUpmKbKdWL0ynfS7sZk4mqD1_4sR_r1G46QXYhw-GkNCPt9dmlys-v61lMYERe8b19cYKnn759INiY-rEMqKbelA6Wu00DtG7vIRSPEc_ktATgXTvibEL_OKHy_eUhmsvBUvVYihvh7j6iOSikKMsSpueDTWHdGQXiE-t_Et3fGTYsLpoB1cI_CMOBl8TCvULsCz6H90uYxHNDMryCE4UlYiP-tdXTPst302x7bgYY39qj8y9bRh5Lrj5F5r2t2YP0AHBSYtIEpQcYrvt3yoHCqVUMa-G_ugc2wP0QoPUrYGUyNMfH3qVUNCdvuHN5QDamUj9KK-_BQ5VsXMDDhYH-wx9FxIewv-FlhlGNu42uezZbZsjFIbOzH2PFzfjIcsShuyacmGGmWT9hz7QL3Kk2RokBNCumiX6jlrpkESpuGFwstBPGXOofpJGd4Vf_9o62YnoOcP0mNjs1FHeW8NMA6mwo_NAArMlmG-fLMyB0Ziwuc6XBKQtVxEjT2Z689V8lxTzjVdkTIswWEyR4kOVyC2AtFg==","summary":[]},{"id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","type":"message","status":"completed","content":[{"type":"output_text","annotations":[{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}],"logprobs":[],"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?"}],"role":"assistant"}],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"default","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"web_search","filters":null,"search_context_size":"medium","user_location":{"type":"approximate","city":null,"country":"US","region":null,"timezone":null}}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":33151,"input_tokens_details":{"cached_tokens":4352},"output_tokens":3367,"output_tokens_details":{"reasoning_tokens":2624},"total_tokens":36518},"user":null,"metadata":{}}} + + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '86' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index fcd0fea9c5..cf321274b7 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1,6 +1,5 @@ """Tests for AG-UI implementation.""" -# pyright: reportPossiblyUnboundVariable=none from __future__ import annotations import contextlib diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py new file mode 100644 index 0000000000..9ac0fdc908 --- /dev/null +++ b/tests/test_vercel_ai.py @@ -0,0 +1,820 @@ +from __future__ import annotations + +import pytest +from inline_snapshot import snapshot + +from pydantic_ai import Agent +from pydantic_ai.builtin_tools import WebSearchTool +from pydantic_ai.models.openai import OpenAIResponsesModel +from pydantic_ai.providers.openai import OpenAIProvider +from pydantic_ai.vercel_ai.request_types import ( + SubmitMessage, + TextUIPart, + ToolOutputAvailablePart, + UIMessage, +) +from pydantic_ai.vercel_ai.response_types import ( + FinishChunk, + ReasoningDeltaChunk, + ReasoningStartChunk, + TextDeltaChunk, + TextStartChunk, + ToolInputDeltaChunk, + ToolInputStartChunk, + ToolOutputAvailableChunk, +) +from pydantic_ai.vercel_ai.starlette import DoneChunk, StarletteChat + +from .conftest import IsStr + +pytestmark = [ + pytest.mark.anyio, + pytest.mark.vcr, + pytest.mark.filterwarnings( + 'ignore:`BuiltinToolCallEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolCallPart` instead.:DeprecationWarning' + ), + pytest.mark.filterwarnings( + 'ignore:`BuiltinToolResultEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolReturnPart` instead.:DeprecationWarning' + ), +] + + +async def test_run(allow_model_requests: None, openai_api_key: str): + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) + agent = Agent(model=model, builtin_tools=[WebSearchTool()]) + chat = StarletteChat(agent) + + data = SubmitMessage( + trigger='submit-message', + id='bvQXcnrJ4OA2iRKU', + messages=[ + UIMessage( + id='BeuwNtYIjJuniHbR', + role='user', + metadata=None, + parts=[ + TextUIPart( + type='text', + text="""Use a tool + + """, + state=None, + provider_metadata=None, + ) + ], + ), + UIMessage( + id='bylfKVeyoR901rax', + role='assistant', + metadata=None, + parts=[ + TextUIPart( + type='text', + text='''I\'d be happy to help you use a tool! However, I need more information about what you\'d like to do. I have access to tools for searching and retrieving documentation for two products: + + 1. **Pydantic AI** (pydantic-ai) - an open source agent framework library + 2. **Pydantic Logfire** (logfire) - an observability platform + + I can help you with: + - Searching the documentation for specific topics or questions + - Getting the table of contents to see what documentation is available + - Retrieving specific documentation files + + What would you like to learn about or search for? Please let me know: + - Which product you\'re interested in (Pydantic AI or Logfire) + - What specific topic, feature, or question you have + + For example, you could ask something like "How do I get started with Pydantic AI?" or "Show me the table of contents for Logfire documentation."''', + state='streaming', + provider_metadata=None, + ) + ], + ), + UIMessage( + id='MTdh4Ie641kDuIRh', + role='user', + metadata=None, + parts=[TextUIPart(type='text', text='Give me the ToCs', state=None, provider_metadata=None)], + ), + UIMessage( + id='3XlOBgFwaf7GsS4l', + role='assistant', + metadata=None, + parts=[ + TextUIPart( + type='text', + text="I'll get the table of contents for both repositories.", + state='streaming', + provider_metadata=None, + ), + ToolOutputAvailablePart( + type='tool-get_table_of_contents', + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', + state='output-available', + input={'repo': 'pydantic-ai'}, + output="[Scrubbed due to 'API Key']", + provider_executed=None, + call_provider_metadata=None, + preliminary=None, + ), + ToolOutputAvailablePart( + type='tool-get_table_of_contents', + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', + state='output-available', + input={'repo': 'logfire'}, + output="[Scrubbed due to 'Auth']", + provider_executed=None, + call_provider_metadata=None, + preliminary=None, + ), + TextUIPart( + type='text', + text="""Here are the Table of Contents for both repositories:... Both products are designed to work together - Pydantic AI for building AI agents and Logfire for observing and monitoring them in production.""", + state='streaming', + provider_metadata=None, + ), + ], + ), + UIMessage( + id='QVypsUU4swQ1Loxq', + role='user', + metadata=None, + parts=[ + TextUIPart( + type='text', + text='How do I get FastAPI instrumentation to include the HTTP request and response', + state=None, + provider_metadata=None, + ) + ], + ), + ], + ) + + events = [event async for event in chat.run(data, None)] + assert events == snapshot( + [ + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', + input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', + input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', + input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + TextStartChunk(id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +Short answer: +- Default\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' FastAPI/OpenTelemetry', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' instrumentation already records method', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/route/status', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +. +- To also\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' include HTTP headers', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=', set', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' the capture-', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='headers env', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + vars. +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' To include request', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=', use the', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/ASGI', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' hooks and add', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' the', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' payload to', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' the span yourself', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' (with red', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='action/size', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + limits). + +How\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' to do it', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +1)\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Enable header capture', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' (server side', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +) +- Choose\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' just the', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' headers you need; avoid', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' sensitive ones or sanitize', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + them. + +export OTEL\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_INSTRUMENTATION_HTTP_CAPTURE', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_HEADERS_SERVER_REQUEST="content', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='-type,user', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='-agent"\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='export OTEL_INSTRUMENTATION', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_HTTP_CAPTURE_HEADERS', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_SERVER_RESPONSE="content-type"\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='export OTEL_INSTRUMENTATION_HTTP', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +_CAPTURE_HEADERS_SANITIZE_FIELDS="authorization,set-cookie" + +This makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)) + +2)\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Add hooks to capture request', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +Note:\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=IsStr(), id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' a built-in Python', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' env', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' var to', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' auto-capture', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' HTTP bodies for Fast', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='API/AS', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='GI. Use', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' hooks to look at', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' ASGI receive', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/send events and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' attach (tr', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='uncated) bodies', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' as span attributes', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +. + +from\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' fastapi import', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +from opente\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='lemetry.trace', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' import Span', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +from opente\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='lemetry.instrument', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='ation.fastapi import', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' FastAPIInstrument', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +or + +MAX\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_BYTES = ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='2048 ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' # keep this', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' small in prod', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +def client\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_request_hook(span', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=': Span,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' scope: dict', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=', message:', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + dict): + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' if span and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' span.is_record', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='ing() and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' message.get("', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='type") ==', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' "http.request', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +": + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +" + if\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + body: + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +( + "\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='http.request.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +", + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +"), + ) +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +def client_response\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_hook(span:', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' Span, scope', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=': dict,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' message: dict', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +): + if\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' span and span', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.is_recording', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='() and message', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.get("type', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='") == "', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +": + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +" + if\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + body: + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +( + "\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +", + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +"), + ) +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +app = Fast\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ +API() +Fast\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='APIInstrumentor', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.instrument_app(', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + app,\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + + client_request\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_hook=client', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +_request_hook, + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' client_response_hook', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='=client_response', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +_hook, +) +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +- The hooks\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' receive the AS', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='GI event dict', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='s: http', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.request (with', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' body/more', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_body) and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' http.response.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='. If your', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' bodies can be', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' chunked,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' you may need', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' to accumulate across', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' calls when message', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.get("more', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_body") is', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' True. ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)', + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=')', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +3)\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Be careful with', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' PII and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + size +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Always limit size', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' and consider redaction', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' before putting payloads', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + on spans. +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Use the sanitize', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' env var above', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' for sensitive headers', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='. ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n', + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +Optional: correlate logs\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + with traces +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' If you also want', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' details in logs with', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' trace IDs, enable', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' Python log correlation:\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +export OTEL_P\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='YTHON_LOG_COR', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='RELATION=true', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +or programmatically\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ +: +from opente\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='lemetry.instrumentation', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.logging import LoggingInstrument', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +or +LoggingInstrument\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='or().instrument(set', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_logging_format=True)\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +This injects trace\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_id/span_id into', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' log records so you', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' can line up logs', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' with the span that', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' carries the HTTP payload', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' attributes. ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n', + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +Want me to tailor\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' the hook to only', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' capture JSON bodies,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' skip binary content,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' or accumulate chunked', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' bodies safely?', id='d775971d84c848228275a25a097b6409'), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', + input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', + input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', + input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} + ), + FinishChunk(), + DoneChunk(), + ] + ) From f0a03d9895e93a8d57bf48de600b3267ea781fa9 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 8 Oct 2025 16:04:20 +0000 Subject: [PATCH 04/10] Claude-assisted refactoring to unify AG-UI and Vercel AI adapters and event streams --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 571 +----------------- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 457 ++++++++++++++ .../pydantic_ai/ui/ag_ui/__init__.py | 12 + .../pydantic_ai/ui/ag_ui/adapter.py | 334 ++++++++++ .../pydantic_ai/ui/ag_ui/event_stream.py | 548 +++++++++++++++++ .../pydantic_ai/ui/vercel_ai/__init__.py | 94 +++ .../pydantic_ai/ui/vercel_ai/_utils.py | 26 + .../pydantic_ai/ui/vercel_ai/adapter.py | 149 +++++ .../pydantic_ai/ui/vercel_ai/event_stream.py | 235 +++++++ .../pydantic_ai/ui/vercel_ai/request_types.py | 70 +++ .../{ => ui}/vercel_ai/response_types.py | 116 ++-- .../pydantic_ai/vercel_ai/__init__.py | 0 .../pydantic_ai/vercel_ai/_utils.py | 18 - .../pydantic_ai/vercel_ai/request_types.py | 274 --------- .../pydantic_ai/vercel_ai/response_stream.py | 122 ---- .../pydantic_ai/vercel_ai/starlette.py | 85 --- tests/test_ag_ui.py | 16 +- tests/test_vercel_ai.py | 11 +- 18 files changed, 2045 insertions(+), 1093 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py rename pydantic_ai_slim/pydantic_ai/{ => ui}/vercel_ai/response_types.py (81%) delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index fe0ed77951..0ae9211bbc 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -7,88 +7,31 @@ from __future__ import annotations import json -import uuid -from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Mapping, Sequence -from dataclasses import Field, dataclass, field, replace +from collections.abc import AsyncIterator, Awaitable, Callable, Mapping, Sequence from http import HTTPStatus from typing import ( Any, - ClassVar, Final, Generic, - Protocol, TypeAlias, - TypeVar, - runtime_checkable, ) -from pydantic import BaseModel, ValidationError - -from . import _utils -from ._agent_graph import CallToolsNode, ModelRequestNode -from .agent import AbstractAgent, AgentRun, AgentRunResult -from .exceptions import UserError -from .messages import ( - BaseToolCallPart, - BuiltinToolCallPart, - BuiltinToolReturnPart, - FunctionToolResultEvent, - ModelMessage, - ModelRequest, - ModelRequestPart, - ModelResponse, - ModelResponsePart, - ModelResponseStreamEvent, - PartDeltaEvent, - PartStartEvent, - SystemPromptPart, - TextPart, - TextPartDelta, - ThinkingPart, - ThinkingPartDelta, - ToolCallPart, - ToolCallPartDelta, - ToolReturnPart, - UserPromptPart, -) +from pydantic import ValidationError + +from .agent import AbstractAgent, AgentRunResult from .models import KnownModelName, Model from .output import OutputDataT, OutputSpec from .settings import ModelSettings -from .tools import AgentDepsT, DeferredToolRequests, ToolDefinition +from .tools import AgentDepsT from .toolsets import AbstractToolset -from .toolsets.external import ExternalToolset from .usage import RunUsage, UsageLimits try: - from ag_ui.core import ( - AssistantMessage, - BaseEvent, - DeveloperMessage, - EventType, - Message, - RunAgentInput, - RunErrorEvent, - RunFinishedEvent, - RunStartedEvent, - State, - SystemMessage, - TextMessageContentEvent, - TextMessageEndEvent, - TextMessageStartEvent, - ThinkingEndEvent, - ThinkingStartEvent, - ThinkingTextMessageContentEvent, - ThinkingTextMessageEndEvent, - ThinkingTextMessageStartEvent, - Tool as AGUITool, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallResultEvent, - ToolCallStartEvent, - ToolMessage, - UserMessage, + from .ui.ag_ui import ( + StateDeps, + StateHandler, ) - from ag_ui.encoder import EventEncoder + from .ui.ag_ui.event_stream import RunAgentInput, _RunError # type: ignore[reportPrivateUsage] except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use `Agent.to_ag_ui()` method, ' @@ -322,464 +265,26 @@ async def run_ag_ui( Yields: Streaming event chunks encoded as strings according to the accept header value. """ - encoder = EventEncoder(accept=accept) - if run_input.tools: - # AG-UI tools can't be prefixed as that would result in a mismatch between the tool names in the - # Pydantic AI events and actual AG-UI tool names, preventing the tool from being called. If any - # conflicts arise, the AG-UI tool should be renamed or a `PrefixedToolset` used for local toolsets. - toolset = _AGUIFrontendToolset[AgentDepsT](run_input.tools) - toolsets = [*toolsets, toolset] if toolsets else [toolset] - - try: - yield encoder.encode( - RunStartedEvent( - thread_id=run_input.thread_id, - run_id=run_input.run_id, - ), - ) - - if not run_input.messages: - raise _NoMessagesError - - raw_state: dict[str, Any] = run_input.state or {} - if isinstance(deps, StateHandler): - if isinstance(deps.state, BaseModel): - try: - state = type(deps.state).model_validate(raw_state) - except ValidationError as e: # pragma: no cover - raise _InvalidStateError from e - else: - state = raw_state - - deps = replace(deps, state=state) - elif raw_state: - raise UserError( - f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' - ) - else: - # `deps` not being a `StateHandler` is OK if there is no state. - pass - - messages = _messages_from_ag_ui(run_input.messages) - - async with agent.iter( - user_prompt=None, - output_type=[output_type or agent.output_type, DeferredToolRequests], - message_history=messages, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - ) as run: - async for event in _agent_stream(run): - yield encoder.encode(event) - - if on_complete is not None and run.result is not None: - if _utils.is_async_callable(on_complete): - await on_complete(run.result) - else: - await _utils.run_in_executor(on_complete, run.result) - except _RunError as e: - yield encoder.encode( - RunErrorEvent(message=e.message, code=e.code), - ) - except Exception as e: - yield encoder.encode( - RunErrorEvent(message=str(e)), - ) - raise e - else: - yield encoder.encode( - RunFinishedEvent( - thread_id=run_input.thread_id, - run_id=run_input.run_id, - ), - ) - - -async def _agent_stream(run: AgentRun[AgentDepsT, Any]) -> AsyncIterator[BaseEvent]: - """Run the agent streaming responses using AG-UI protocol events. - - Args: - run: The agent run to process. - - Yields: - AG-UI Server-Sent Events (SSE). - """ - async for node in run: - stream_ctx = _RequestStreamContext() - if isinstance(node, ModelRequestNode): - async with node.stream(run.ctx) as request_stream: - async for agent_event in request_stream: - async for msg in _handle_model_request_event(stream_ctx, agent_event): - yield msg - - if stream_ctx.part_end: # pragma: no branch - yield stream_ctx.part_end - stream_ctx.part_end = None - if stream_ctx.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - stream_ctx.thinking = False - elif isinstance(node, CallToolsNode): - async with node.stream(run.ctx) as handle_stream: - async for event in handle_stream: - if isinstance(event, FunctionToolResultEvent): - async for msg in _handle_tool_result_event(stream_ctx, event): - yield msg - - -async def _handle_model_request_event( # noqa: C901 - stream_ctx: _RequestStreamContext, - agent_event: ModelResponseStreamEvent, -) -> AsyncIterator[BaseEvent]: - """Handle an agent event and yield AG-UI protocol events. - - Args: - stream_ctx: The request stream context to manage state. - agent_event: The agent event to process. - - Yields: - AG-UI Server-Sent Events (SSE) based on the agent event. - """ - if isinstance(agent_event, PartStartEvent): - if stream_ctx.part_end: - # End the previous part. - yield stream_ctx.part_end - stream_ctx.part_end = None - - part = agent_event.part - if isinstance(part, ThinkingPart): # pragma: no branch - if not stream_ctx.thinking: - yield ThinkingStartEvent( - type=EventType.THINKING_START, - ) - stream_ctx.thinking = True - - if part.content: - yield ThinkingTextMessageStartEvent( - type=EventType.THINKING_TEXT_MESSAGE_START, - ) - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, - delta=part.content, - ) - stream_ctx.part_end = ThinkingTextMessageEndEvent( - type=EventType.THINKING_TEXT_MESSAGE_END, - ) - else: - if stream_ctx.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - stream_ctx.thinking = False - - if isinstance(part, TextPart): - message_id = stream_ctx.new_message_id() - yield TextMessageStartEvent( - message_id=message_id, - ) - if part.content: # pragma: no branch - yield TextMessageContentEvent( - message_id=message_id, - delta=part.content, - ) - stream_ctx.part_end = TextMessageEndEvent( - message_id=message_id, - ) - elif isinstance(part, BaseToolCallPart): - tool_call_id = part.tool_call_id - if isinstance(part, BuiltinToolCallPart): - builtin_tool_call_id = '|'.join( - [_BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id] - ) - stream_ctx.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id - tool_call_id = builtin_tool_call_id - - message_id = stream_ctx.message_id or stream_ctx.new_message_id() - yield ToolCallStartEvent( - tool_call_id=tool_call_id, - tool_call_name=part.tool_name, - parent_message_id=message_id, - ) - if part.args: - yield ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=part.args_as_json_str(), - ) - stream_ctx.part_end = ToolCallEndEvent( - tool_call_id=tool_call_id, - ) - elif isinstance(part, BuiltinToolReturnPart): # pragma: no branch - tool_call_id = stream_ctx.builtin_tool_call_ids[part.tool_call_id] - yield ToolCallResultEvent( - message_id=stream_ctx.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=tool_call_id, - content=part.model_response_str(), - ) - - elif isinstance(agent_event, PartDeltaEvent): - delta = agent_event.delta - if isinstance(delta, TextPartDelta): - if delta.content_delta: # pragma: no branch - yield TextMessageContentEvent( - message_id=stream_ctx.message_id, - delta=delta.content_delta, - ) - elif isinstance(delta, ToolCallPartDelta): # pragma: no branch - tool_call_id = delta.tool_call_id - assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' - if tool_call_id in stream_ctx.builtin_tool_call_ids: - tool_call_id = stream_ctx.builtin_tool_call_ids[tool_call_id] - yield ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), - ) - elif isinstance(delta, ThinkingPartDelta): # pragma: no branch - if delta.content_delta: # pragma: no branch - if not isinstance(stream_ctx.part_end, ThinkingTextMessageEndEvent): - yield ThinkingTextMessageStartEvent( - type=EventType.THINKING_TEXT_MESSAGE_START, - ) - stream_ctx.part_end = ThinkingTextMessageEndEvent( - type=EventType.THINKING_TEXT_MESSAGE_END, - ) - - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, - delta=delta.content_delta, - ) - - -async def _handle_tool_result_event( - stream_ctx: _RequestStreamContext, - event: FunctionToolResultEvent, -) -> AsyncIterator[BaseEvent]: - """Convert a tool call result to AG-UI events. - - Args: - stream_ctx: The request stream context to manage state. - event: The tool call result event to process. - - Yields: - AG-UI Server-Sent Events (SSE). - """ - result = event.result - if not isinstance(result, ToolReturnPart): - return - - yield ToolCallResultEvent( - message_id=stream_ctx.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=result.tool_call_id, - content=result.model_response_str(), - ) - - # Now check for AG-UI events returned by the tool calls. - possible_event = result.metadata or result.content - if isinstance(possible_event, BaseEvent): - yield possible_event - elif isinstance(possible_event, str | bytes): # pragma: no branch - # Avoid iterable check for strings and bytes. - pass - elif isinstance(possible_event, Iterable): # pragma: no branch - for item in possible_event: # type: ignore[reportUnknownMemberType] - if isinstance(item, BaseEvent): # pragma: no branch - yield item - - -def _messages_from_ag_ui(messages: list[Message]) -> list[ModelMessage]: - """Convert a AG-UI history to a Pydantic AI one.""" - result: list[ModelMessage] = [] - tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. - request_parts: list[ModelRequestPart] | None = None - response_parts: list[ModelResponsePart] | None = None - for msg in messages: - if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( - isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if request_parts is None: - request_parts = [] - result.append(ModelRequest(parts=request_parts)) - response_parts = None - - if isinstance(msg, UserMessage): - request_parts.append(UserPromptPart(content=msg.content)) - elif isinstance(msg, SystemMessage | DeveloperMessage): - request_parts.append(SystemPromptPart(content=msg.content)) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise _ToolCallNotFoundError(tool_call_id=tool_call_id) - - request_parts.append( - ToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - ) - ) - - elif isinstance(msg, AssistantMessage) or ( # pragma: no branch - isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if response_parts is None: - response_parts = [] - result.append(ModelResponse(parts=response_parts)) - request_parts = None - - if isinstance(msg, AssistantMessage): - if msg.content: - response_parts.append(TextPart(content=msg.content)) - - if msg.tool_calls: - for tool_call in msg.tool_calls: - tool_call_id = tool_call.id - tool_name = tool_call.function.name - tool_calls[tool_call_id] = tool_name - - if tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX): - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - response_parts.append( - BuiltinToolCallPart( - tool_name=tool_name, - args=tool_call.function.arguments, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - else: - response_parts.append( - ToolCallPart( - tool_name=tool_name, - tool_call_id=tool_call_id, - args=tool_call.function.arguments, - ) - ) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise _ToolCallNotFoundError(tool_call_id=tool_call_id) - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - - response_parts.append( - BuiltinToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - - return result - - -@runtime_checkable -class StateHandler(Protocol): - """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" - - # Has to be a dataclass so we can use `replace` to update the state. - # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 - __dataclass_fields__: ClassVar[dict[str, Field[Any]]] - - @property - def state(self) -> State: - """Get the current state of the agent run.""" - ... - - @state.setter - def state(self, state: State) -> None: - """Set the state of the agent run. - - This method is called to update the state of the agent run with the - provided state. - - Args: - state: The run state. - - Raises: - InvalidStateError: If `state` does not match the expected model. - """ - ... - - -StateT = TypeVar('StateT', bound=BaseModel) -"""Type variable for the state type, which must be a subclass of `BaseModel`.""" - - -@dataclass -class StateDeps(Generic[StateT]): - """Provides AG-UI state management. - - This class is used to manage the state of an agent run. It allows setting - the state of the agent run with a specific type of state model, which must - be a subclass of `BaseModel`. - - The state is set using the `state` setter by the `Adapter` when the run starts. - - Implements the `StateHandler` protocol. - """ - - state: StateT - - -@dataclass(repr=False) -class _RequestStreamContext: - """Data class to hold request stream context.""" - - message_id: str = '' - part_end: BaseEvent | None = None - thinking: bool = False - builtin_tool_call_ids: dict[str, str] = field(default_factory=dict) - - def new_message_id(self) -> str: - """Generate a new message ID for the request stream. - - Assigns a new UUID to the `message_id` and returns it. - - Returns: - A new message ID. - """ - self.message_id = str(uuid.uuid4()) - return self.message_id - - -@dataclass -class _RunError(Exception): - """Exception raised for errors during agent runs.""" - - message: str - code: str - - def __str__(self) -> str: # pragma: no cover - return self.message - - -@dataclass -class _NoMessagesError(_RunError): - """Exception raised when no messages are found in the input.""" - - message: str = 'no messages found in the input' - code: str = 'no_messages' - - -@dataclass -class _InvalidStateError(_RunError, ValidationError): - """Exception raised when an invalid state is provided.""" - - message: str = 'invalid state provided' - code: str = 'invalid_state' - - + from .ui.ag_ui import AGUIAdapter + + adapter = AGUIAdapter(agent=agent) + async for event_str in adapter.run_stream_sse( + request=run_input, + accept=accept, + output_type=output_type, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ): + yield event_str + + +# _ToolCallNotFoundError is defined here (not in ui/ag_ui) since it's specific to this module class _ToolCallNotFoundError(_RunError, ValueError): """Exception raised when an tool result is present without a matching call.""" @@ -789,21 +294,3 @@ def __init__(self, tool_call_id: str) -> None: message=f'Tool call with ID {tool_call_id} not found in the history.', code='tool_call_not_found', ) - - -class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): - def __init__(self, tools: list[AGUITool]): - super().__init__( - [ - ToolDefinition( - name=tool.name, - description=tool.description, - parameters_json_schema=tool.parameters, - ) - for tool in tools - ] - ) - - @property - def label(self) -> str: - return 'the AG-UI frontend tools' # pragma: no cover diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py new file mode 100644 index 0000000000..dec4830efe --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -0,0 +1,457 @@ +"""Base classes for UI event stream protocols. + +This module provides abstract base classes for implementing UI event stream adapters +that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). +""" + +# pyright: reportIncompatibleMethodOverride=false, reportUnknownVariableType=false, reportGeneralTypeIssues=false + +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator +from typing import Any, Generic, Protocol, TypeVar, runtime_checkable +from uuid import uuid4 + +from ..messages import ( + AgentStreamEvent, + BuiltinToolCallEvent, # type: ignore[reportDeprecated] + BuiltinToolCallPart, + BuiltinToolResultEvent, # type: ignore[reportDeprecated] + BuiltinToolReturnPart, + FilePart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + ModelMessage, + PartDeltaEvent, + PartStartEvent, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, +) +from ..run import AgentRunResultEvent +from ..tools import AgentDepsT + +__all__ = [ + 'SSEEvent', + 'BaseEventStream', + 'BaseAdapter', +] + +EventT = TypeVar('EventT', bound='SSEEvent') +"""Type variable for protocol-specific event types.""" + + +@runtime_checkable +class SSEEvent(Protocol): + """Protocol for events that can be encoded as Server-Sent Events (SSE).""" + + def sse(self) -> str: + """Encode event as a Server-Sent Event string. + + Returns: + The SSE-formatted string representation of the event. + """ + ... + + +class BaseEventStream(ABC, Generic[EventT, AgentDepsT]): + """Base class for transforming pAI agent events into protocol-specific events. + + This class provides a granular method-per-part-type pattern that makes it easy to customize + specific event transformations by overriding individual handler methods. + + Subclasses should: + 1. Initialize state attributes in `__init__` if needed + 2. Implement all abstract `handle_*` methods for event transformation + 3. Implement error handling methods + 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) + + Example: + ```python + class MyEventStream(BaseEventStream[MyEvent, MyDeps]): + def __init__(self): + super().__init__() + self.custom_state = {} + + async def handle_text_start(self, part: TextPart): + yield MyTextStartEvent(id=self.new_message_id(), text=part.content) + ``` + """ + + def __init__(self) -> None: + """Initialize event stream state. + + Subclasses can add additional state attributes for tracking streaming context. + """ + self.message_id: str = '' + + def new_message_id(self) -> str: + """Generate and store a new message ID. + + Returns: + A new UUID-based message ID. + """ + self.message_id = str(uuid4()) + return self.message_id + + async def agent_event_to_events(self, event: AgentStreamEvent | AgentRunResultEvent) -> AsyncIterator[EventT]: # noqa: C901 + """Transform a pAI agent event into protocol-specific events. + + This method dispatches to specific `handle_*` methods based on event and part type. + Subclasses should implement the individual handler methods rather than overriding this. + + Args: + event: The pAI agent event to transform. + + Yields: + Protocol-specific events. + """ + match event: + case PartStartEvent(part=part): + # Dispatch based on part type + match part: + case TextPart(): + async for e in self.handle_text_start(part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_start(part): + yield e + case ToolCallPart() | BuiltinToolCallPart(): + async for e in self.handle_tool_call_start(part): + yield e + case BuiltinToolReturnPart(): + async for e in self.handle_builtin_tool_return(part): + yield e + case FilePart(): + # FilePart is not currently handled by UI protocols + pass + case PartDeltaEvent(delta=delta): + # Dispatch based on delta type + match delta: + case TextPartDelta(): + async for e in self.handle_text_delta(delta): + yield e + case ThinkingPartDelta(): + async for e in self.handle_thinking_delta(delta): + yield e + case ToolCallPartDelta(): + async for e in self.handle_tool_call_delta(delta): + yield e + case FunctionToolCallEvent(): + async for e in self.handle_function_tool_call(event): + yield e + case FunctionToolResultEvent(): + async for e in self.handle_function_tool_result(event): + yield e + case BuiltinToolCallEvent(): # type: ignore[reportDeprecated] + async for e in self.handle_builtin_tool_call(event): + yield e + case BuiltinToolResultEvent(): # type: ignore[reportDeprecated] + async for e in self.handle_builtin_tool_result(event): + yield e + case FinalResultEvent(): + async for e in self.handle_final_result(event): + yield e + case AgentRunResultEvent(): + async for e in self.handle_run_result(event): + yield e + + # Granular part handlers (abstract - must implement) + + @abstractmethod + async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: + """Handle a TextPart at start. + + Args: + part: The TextPart. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: + """Handle a TextPartDelta. + + Args: + delta: The TextPartDelta. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: + """Handle a ThinkingPart at start. + + Args: + part: The ThinkingPart. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: + """Handle a ThinkingPartDelta. + + Args: + delta: The ThinkingPartDelta. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle a ToolCallPart or BuiltinToolCallPart at start. + + Args: + part: The tool call part. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: + """Handle a ToolCallPartDelta. + + Args: + delta: The ToolCallPartDelta. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: + """Handle a BuiltinToolReturnPart. + + Args: + part: The BuiltinToolReturnPart. + + Yields: + Protocol-specific events. + """ + + # Tool event handlers (abstract - must implement) + + @abstractmethod + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolCallEvent. + + Args: + event: The function tool call event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolResultEvent. + + Args: + event: The function tool result event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolCallEvent. + + Args: + event: The builtin tool call event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolResultEvent. + + Args: + event: The builtin tool result event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: + """Handle a FinalResultEvent. + + Args: + event: The final result event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: + """Handle an AgentRunResultEvent (final event with result/usage). + + Args: + event: The agent run result event. + + Yields: + Protocol-specific events. + """ + + # Lifecycle hooks (optional overrides) + + async def before_stream(self) -> AsyncIterator[EventT]: + """Yield events before agent streaming starts. + + This hook is called before any agent events are processed. + Override this to inject custom events at the start of the stream. + + Yields: + Protocol-specific events to emit before streaming. + """ + return + yield # Make this an async generator + + async def after_stream(self) -> AsyncIterator[EventT]: + """Yield events after agent streaming completes. + + This hook is called after all agent events have been processed. + Override this to inject custom events at the end of the stream. + + Yields: + Protocol-specific events to emit after streaming. + """ + return + yield # Make this an async generator + + # Error handling (must implement) + + @abstractmethod + async def on_validation_error(self, error: Exception) -> AsyncIterator[EventT]: + """Handle validation errors that occur before streaming starts. + + Args: + error: The validation error that occurred. + + Yields: + Protocol-specific error events. + """ + + @abstractmethod + async def on_stream_error(self, error: Exception) -> AsyncIterator[EventT]: + """Handle errors that occur during streaming (after stream has started). + + Args: + error: The error that occurred during streaming. + + Yields: + Protocol-specific error events. + """ + + +RequestT = TypeVar('RequestT') +"""Type variable for protocol-specific request types.""" + +MessageT = TypeVar('MessageT') +"""Type variable for protocol-specific message types.""" + + +class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): + """Base adapter for handling UI protocol requests and streaming responses. + + This class provides a unified interface for request/response handling across different + UI protocols (AG-UI, Vercel AI, etc.). It handles: + - Request parsing and validation + - Message format conversion (protocol messages → pAI messages) + - Agent execution and event streaming + - Error handling (validation errors vs streaming errors) + - SSE encoding + + Type Parameters: + RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) + MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) + EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, AbstractSSEChunk) + AgentDepsT: Agent dependencies type + + Example: + ```python + class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): + def create_event_stream(self) -> BaseEventStream[MyEvent, MyDeps]: + return MyEventStream() + + def parse_request_messages(self, request: MyRequest) -> list[MyMessage]: + return request.messages + + def protocol_messages_to_pai_messages(self, messages: list[MyMessage]) -> list[ModelMessage]: + # Convert protocol messages to pAI messages + ... + ``` + """ + + @abstractmethod + def create_event_stream(self) -> BaseEventStream[EventT, AgentDepsT]: + """Create a new event stream for this protocol. + + Returns: + A protocol-specific event stream instance. + """ + + @abstractmethod + def parse_request_messages(self, request: RequestT) -> list[MessageT]: + """Extract messages from the protocol request. + + Args: + request: The protocol-specific request. + + Returns: + List of protocol-specific messages. + """ + + @abstractmethod + def protocol_messages_to_pai_messages(self, messages: list[MessageT]) -> list[ModelMessage]: + """Convert protocol messages to Pydantic AI messages. + + Args: + messages: List of protocol-specific messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + + @abstractmethod + def encode_event(self, event: EventT) -> str: + """Encode a protocol event as an SSE string. + + Args: + event: The protocol-specific event. + + Returns: + SSE-formatted string. + """ + + @abstractmethod + async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: + """Handle a request and return a response. + + This method should handle the full request/response cycle: + - Parse and validate the request + - Run the agent with the request data + - Return an appropriate response (e.g., StreamingResponse, EventSourceResponse) + + Args: + request: The protocol-specific request object (e.g., Starlette Request). + deps: Optional dependencies to pass to the agent. + + Returns: + A protocol-specific response object. + """ diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py new file mode 100644 index 0000000000..620b74cd28 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -0,0 +1,12 @@ +"""AG-UI protocol integration for Pydantic AI agents.""" + +from .adapter import AGUIAdapter +from .event_stream import AGUIEventStream, StateDeps, StateHandler, protocol_messages_to_pai_messages + +__all__ = [ + 'AGUIAdapter', + 'AGUIEventStream', + 'StateHandler', + 'StateDeps', + 'protocol_messages_to_pai_messages', +] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py new file mode 100644 index 0000000000..e6ffa784f3 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py @@ -0,0 +1,334 @@ +"""AG-UI adapter for handling requests.""" + +# pyright: reportGeneralTypeIssues=false, reportInvalidTypeArguments=false + +from __future__ import annotations + +import json +from dataclasses import dataclass +from http import HTTPStatus +from typing import TYPE_CHECKING, Any + +from pydantic import BaseModel, ValidationError + +from ...tools import AgentDepsT +from .event_stream import ( + AGUIEventStream, + RunAgentInput, + StateHandler, + _AGUIFrontendToolset, # type: ignore[reportPrivateUsage] + _InvalidStateError, # type: ignore[reportPrivateUsage] + _NoMessagesError, # type: ignore[reportPrivateUsage] + _RunError, # type: ignore[reportPrivateUsage] + protocol_messages_to_pai_messages, +) + +if TYPE_CHECKING: + from ...agent import Agent + +__all__ = ['AGUIAdapter'] + + +@dataclass +class AGUIAdapter: + """Adapter for handling AG-UI protocol requests with Pydantic AI agents. + + This adapter provides an interface for integrating Pydantic AI agents + with the AG-UI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.ag_ui import AGUIAdapter + + agent = Agent('openai:gpt-4') + adapter = AGUIAdapter(agent) + + async def handle_request(request: RunAgentInput, deps=None): + async for event_str in adapter.run_stream_sse(request, deps): + yield event_str + ``` + """ + + agent: Agent[AgentDepsT] + """The Pydantic AI agent to run.""" + + async def run_stream( # noqa: C901 + self, + request: RunAgentInput, + deps: AgentDepsT | None = None, + *, + output_type: Any = None, + model: Any = None, + model_settings: Any = None, + usage_limits: Any = None, + usage: Any = None, + infer_name: bool = True, + toolsets: Any = None, + on_complete: Any = None, + ): + """Stream events from an agent run as AG-UI protocol events. + + This method provides a complete implementation with all AG-UI features including: + - Frontend tools handling + - State injection + - Error handling (validation vs stream errors) + - on_complete callback + - RunStarted and RunFinished events + + Args: + request: The AG-UI request data. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Yields: + AG-UI protocol events (BaseEvent subclasses). + + Raises: + _RunError: If request validation fails or other errors occur. + """ + from ... import _utils + from ...exceptions import UserError + from ...tools import DeferredToolRequests + from .event_stream import RunFinishedEvent, RunStartedEvent + + # Create event stream + event_stream = self.create_event_stream() + stream_started = False + + # Handle frontend tools + if request.tools: + toolset = _AGUIFrontendToolset[AgentDepsT](request.tools) + toolsets = [*toolsets, toolset] if toolsets else [toolset] + + try: + # Emit start event + yield RunStartedEvent( + thread_id=request.thread_id, + run_id=request.run_id, + ) + stream_started = True + + if not request.messages: + raise _NoMessagesError + + # Handle state injection + raw_state: dict[str, Any] = request.state or {} + if isinstance(deps, StateHandler): + if isinstance(deps.state, BaseModel): + try: + state = type(deps.state).model_validate(raw_state) + except ValidationError as e: # pragma: no cover + raise _InvalidStateError from e + else: + state = raw_state + + from dataclasses import replace + + deps = replace(deps, state=state) + elif raw_state: + raise UserError( + f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' + ) + + # Convert AG-UI messages to pAI messages + messages = protocol_messages_to_pai_messages(request.messages) + + # Run agent and stream events + result = None + async for event in self.agent.run_stream_events( + user_prompt=None, + output_type=[output_type or self.agent.output_type, DeferredToolRequests], + message_history=messages, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + ): + from ...run import AgentRunResultEvent + + # Capture result for on_complete callback + if isinstance(event, AgentRunResultEvent): + result = event.result + + # Transform pAI events to AG-UI events + async for ag_ui_event in event_stream.agent_event_to_events(event): # type: ignore[arg-type] + yield ag_ui_event + + # Call on_complete callback + if on_complete is not None and result is not None: + if _utils.is_async_callable(on_complete): + await on_complete(result) + else: + await _utils.run_in_executor(on_complete, result) + + except _RunError as e: + if stream_started: + async for error_event in event_stream.on_stream_error(e): + yield error_event + else: + async for error_event in event_stream.on_validation_error(e): + yield error_event + raise + except Exception as e: + if stream_started: + async for error_event in event_stream.on_stream_error(e): + yield error_event + else: + async for error_event in event_stream.on_validation_error(e): + yield error_event + raise + else: + # Emit finish event + yield RunFinishedEvent( + thread_id=request.thread_id, + run_id=request.run_id, + ) + + async def run_stream_sse( + self, + request: RunAgentInput, + accept: str, + *, + output_type: Any = None, + model: Any = None, + deps: AgentDepsT | None = None, + model_settings: Any = None, + usage_limits: Any = None, + usage: Any = None, + infer_name: bool = True, + toolsets: Any = None, + on_complete: Any = None, + ): + """Stream SSE-encoded events from an agent run. + + This method wraps `run_stream` and encodes the events as SSE strings. + + Args: + request: The AG-UI request data. + accept: The accept header value for encoding. + output_type: Custom output type for this run. + model: Optional model to use for this run. + deps: Optional dependencies to pass to the agent. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Yields: + SSE-formatted strings. + """ + from ag_ui.encoder import EventEncoder + + encoder = EventEncoder(accept=accept) + + try: + async for event in self.run_stream( + request=request, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ): + yield encoder.encode(event) + except _RunError: + # Error events are already yielded by run_stream + # This shouldn't actually be reached since run_stream yields error events before raising + pass + except Exception: + # Let other exceptions propagate + raise + + async def dispatch_request( + self, + request: Any, + deps: AgentDepsT | None = None, + *, + output_type: Any = None, + model: Any = None, + model_settings: Any = None, + usage_limits: Any = None, + usage: Any = None, + infer_name: bool = True, + toolsets: Any = None, + on_complete: Any = None, + ) -> Any: + """Handle an AG-UI request and return a streaming response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Returns: + A streaming Starlette response with AG-UI protocol events. + """ + try: + from starlette.requests import Request + from starlette.responses import Response, StreamingResponse + except ImportError as e: # pragma: no cover + raise ImportError('Please install starlette to use dispatch_request') from e + + if not isinstance(request, Request): # pragma: no cover + raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') + + accept = request.headers.get('accept', 'text/event-stream') + + try: + input_data = RunAgentInput.model_validate(await request.json()) + except ValidationError as e: # pragma: no cover + return Response( + content=json.dumps(e.json()), + media_type='application/json', + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, + ) + + return StreamingResponse( + self.run_stream_sse( + request=input_data, + accept=accept, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ), + media_type=accept, + ) + + def create_event_stream(self) -> AGUIEventStream[AgentDepsT]: + """Create a new AG-UI event stream. + + Returns: + An AGUIEventStream instance. + """ + return AGUIEventStream[AgentDepsT]() diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py new file mode 100644 index 0000000000..5ce69abfda --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py @@ -0,0 +1,548 @@ +"""AG-UI protocol adapter for Pydantic AI agents. + +This module provides classes for integrating Pydantic AI agents with the AG-UI protocol, +enabling streaming event-based communication for interactive AI applications. +""" + +# pyright: reportIncompatibleMethodOverride=false, reportUnusedClass=false, reportGeneralTypeIssues=false, reportInvalidTypeArguments=false + +from __future__ import annotations + +import json +from collections.abc import AsyncIterator, Iterable +from dataclasses import Field, dataclass +from typing import TYPE_CHECKING, Any, ClassVar, Final, Generic, Protocol, TypeVar, runtime_checkable + +if TYPE_CHECKING: + pass # Agent type is not actually used in this module + +from pydantic import BaseModel, ValidationError + +from ...messages import ( + BuiltinToolCallEvent, # type: ignore[reportDeprecated] + BuiltinToolCallPart, + BuiltinToolResultEvent, # type: ignore[reportDeprecated] + BuiltinToolReturnPart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + ModelMessage, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, + ToolReturnPart, +) +from ...run import AgentRunResultEvent +from ...tools import AgentDepsT, ToolDefinition +from ...toolsets.external import ExternalToolset +from .. import BaseEventStream + +try: + from ag_ui.core import ( + AssistantMessage, + BaseEvent, + DeveloperMessage, + EventType, + Message, + RunAgentInput, + RunErrorEvent, + RunFinishedEvent, + RunStartedEvent, + SystemMessage, + TextMessageContentEvent, + TextMessageEndEvent, + TextMessageStartEvent, + ThinkingEndEvent, + ThinkingStartEvent, + ThinkingTextMessageContentEvent, + ThinkingTextMessageEndEvent, + ThinkingTextMessageStartEvent, + Tool as AGUITool, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallResultEvent, + ToolCallStartEvent, + ToolMessage, + UserMessage, + ) +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +__all__ = [ + 'AGUIEventStream', + 'StateHandler', + 'StateDeps', + 'protocol_messages_to_pai_messages', + '_AGUIFrontendToolset', + '_NoMessagesError', + '_InvalidStateError', + '_RunError', + 'RunAgentInput', + 'RunStartedEvent', + 'RunFinishedEvent', +] + +_BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' + + +# State management types + +StateT = TypeVar('StateT', bound=BaseModel) +"""Type variable for the state type, which must be a subclass of `BaseModel`.""" + + +@runtime_checkable +class StateHandler(Protocol): + """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" + + # Has to be a dataclass so we can use `replace` to update the state. + # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 + __dataclass_fields__: ClassVar[dict[str, Field[Any]]] + + @property + def state(self) -> Any: + """Get the current state of the agent run.""" + ... + + @state.setter + def state(self, state: Any) -> None: + """Set the state of the agent run. + + This method is called to update the state of the agent run with the + provided state. + + Args: + state: The run state. + + Raises: + InvalidStateError: If `state` does not match the expected model. + """ + ... + + +@dataclass +class StateDeps(Generic[StateT]): + """Provides AG-UI state management. + + This class is used to manage the state of an agent run. It allows setting + the state of the agent run with a specific type of state model, which must + be a subclass of `BaseModel`. + + The state is set using the `state` setter by the `Adapter` when the run starts. + + Implements the `StateHandler` protocol. + """ + + state: StateT + + +# Error types + + +@dataclass +class _RunError(Exception): + """Exception raised for errors during agent runs.""" + + message: str + code: str + + def __str__(self) -> str: # pragma: no cover + return self.message + + +@dataclass +class _NoMessagesError(_RunError): + """Exception raised when no messages are found in the input.""" + + message: str = 'no messages found in the input' + code: str = 'no_messages' + + +@dataclass +class _InvalidStateError(_RunError, ValidationError): + """Exception raised when an invalid state is provided.""" + + message: str = 'invalid state provided' + code: str = 'invalid_state' + + +class _ToolCallNotFoundError(_RunError, ValueError): + """Exception raised when an tool result is present without a matching call.""" + + def __init__(self, tool_call_id: str) -> None: + """Initialize the exception with the tool call ID.""" + super().__init__( # pragma: no cover + message=f'Tool call with ID {tool_call_id} not found in the history.', + code='tool_call_not_found', + ) + + +# Frontend toolset + + +class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): + """Toolset for AG-UI frontend tools.""" + + def __init__(self, tools: list[AGUITool]): + """Initialize the toolset with AG-UI tools. + + Args: + tools: List of AG-UI tool definitions. + """ + super().__init__( + [ + ToolDefinition( + name=tool.name, + description=tool.description, + parameters_json_schema=tool.parameters, + ) + for tool in tools + ] + ) + + @property + def label(self) -> str: + """Return the label for this toolset.""" + return 'the AG-UI frontend tools' # pragma: no cover + + +class AGUIEventStream(BaseEventStream[BaseEvent, AgentDepsT]): + """Transforms Pydantic AI agent events into AG-UI protocol events. + + This class handles the stateful transformation of streaming agent events + into the AG-UI protocol format, managing message IDs, thinking mode state, + and tool call ID mappings for builtin tools. + + Example: + ```python + event_stream = AGUIEventStream() + async for ag_ui_event in event_stream.agent_event_to_events(pai_event): + print(ag_ui_event) + ``` + """ + + def __init__(self) -> None: + """Initialize AG-UI event stream state.""" + super().__init__() + self.part_end: BaseEvent | None = None + self.thinking: bool = False + self.builtin_tool_call_ids: dict[str, str] = {} + + # Granular handlers implementation + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: + """Handle a TextPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + message_id = self.new_message_id() + yield TextMessageStartEvent(message_id=message_id) + if part.content: # pragma: no branch + yield TextMessageContentEvent(message_id=message_id, delta=part.content) + self.part_end = TextMessageEndEvent(message_id=message_id) + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a TextPartDelta.""" + if delta.content_delta: # pragma: no branch + yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if not self.thinking: + yield ThinkingStartEvent(type=EventType.THINKING_START) + self.thinking = True + + if part.content: + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPartDelta.""" + if delta.content_delta: # pragma: no branch + if not isinstance(self.part_end, ThinkingTextMessageEndEvent): + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + yield ThinkingTextMessageContentEvent( + type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta + ) + + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + tool_call_id = part.tool_call_id + if isinstance(part, BuiltinToolCallPart): + builtin_tool_call_id = '|'.join([_BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + + message_id = self.message_id or self.new_message_id() + yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) + if part.args: + yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) + self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPartDelta.""" + tool_call_id = delta.tool_call_id + assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' + if tool_call_id in self.builtin_tool_call_ids: + tool_call_id = self.builtin_tool_call_ids[tool_call_id] + yield ToolCallArgsEvent( + tool_call_id=tool_call_id, + delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), + ) + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolReturnPart.""" + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=tool_call_id, + content=part.model_response_str(), + ) + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolCallEvent. + + This event is emitted when a function tool is called, but no AG-UI events + are needed at this stage since tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolResultEvent, emitting tool result events.""" + result = event.result + if not isinstance(result, ToolReturnPart): + return + + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=result.tool_call_id, + content=result.model_response_str(), + ) + + # Check for AG-UI events returned by tool calls. + possible_event = result.metadata or result.content + if isinstance(possible_event, BaseEvent): + yield possible_event + elif isinstance(possible_event, str | bytes): # pragma: no branch + # Avoid iterable check for strings and bytes. + pass + elif isinstance(possible_event, Iterable): # pragma: no branch + for item in possible_event: # type: ignore[reportUnknownMemberType] + if isinstance(item, BaseEvent): # pragma: no branch + yield item + + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolCallEvent. + + This event is emitted when a builtin tool is called, but no AG-UI events + are needed at this stage since builtin tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolResultEvent. + + This event is emitted when a builtin tool returns. We need to emit any pending + part_end event (TOOL_CALL_END) before the result is shown in handle_builtin_tool_return. + """ + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FinalResultEvent. + + This event is emitted when the agent produces a final result, but no AG-UI events + are needed at this stage. + """ + return + yield # Make this an async generator + + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[BaseEvent]: + """Handle an AgentRunResultEvent, cleaning up any pending state.""" + # Emit any pending part end event + if self.part_end: # pragma: no branch + yield self.part_end + self.part_end = None + + # End thinking mode if still active + if self.thinking: + yield ThinkingEndEvent( + type=EventType.THINKING_END, + ) + self.thinking = False + + async def on_validation_error(self, error: Exception) -> AsyncIterator[BaseEvent]: + """Handle validation errors before stream starts.""" + # Try to get code from exception if it has one, otherwise use class name + code = getattr(error, 'code', error.__class__.__name__) + yield RunErrorEvent(message=str(error), code=code) + + async def on_stream_error(self, error: Exception) -> AsyncIterator[BaseEvent]: + """Handle errors during streaming.""" + # Try to get code from exception if it has one, otherwise use class name + code = getattr(error, 'code', error.__class__.__name__) + yield RunErrorEvent(message=str(error), code=code) + + def encode_event(self, event: BaseEvent, accept: str) -> str: + """Encode an AG-UI event as SSE. + + Args: + event: The AG-UI event to encode. + accept: The accept header value for encoding format. + + Returns: + The SSE-formatted string. + """ + from ag_ui.encoder import EventEncoder + + encoder = EventEncoder(accept=accept) + return encoder.encode(event) + + +def protocol_messages_to_pai_messages(messages: list[Message]) -> list[ModelMessage]: + """Convert AG-UI messages to Pydantic AI messages. + + Args: + messages: List of AG-UI messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + from ...messages import ( + ModelRequest, + ModelRequestPart, + ModelResponse, + ModelResponsePart, + SystemPromptPart, + UserPromptPart, + ) + + result: list[ModelMessage] = [] + tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. + request_parts: list[ModelRequestPart] | None = None + response_parts: list[ModelResponsePart] | None = None + + for msg in messages: + if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( + isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if request_parts is None: + request_parts = [] + result.append(ModelRequest(parts=request_parts)) + response_parts = None + + if isinstance(msg, UserMessage): + request_parts.append(UserPromptPart(content=msg.content)) + elif isinstance(msg, SystemMessage | DeveloperMessage): + request_parts.append(SystemPromptPart(content=msg.content)) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + + request_parts.append( + ToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + ) + ) + + elif isinstance(msg, AssistantMessage) or ( # pragma: no branch + isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if response_parts is None: + response_parts = [] + result.append(ModelResponse(parts=response_parts)) + request_parts = None + + if isinstance(msg, AssistantMessage): + if msg.content: + response_parts.append(TextPart(content=msg.content)) + + if msg.tool_calls: + for tool_call in msg.tool_calls: + tool_call_id = tool_call.id + tool_name = tool_call.function.name + tool_calls[tool_call_id] = tool_name + + if tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX): + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + response_parts.append( + BuiltinToolCallPart( + tool_name=tool_name, + args=tool_call.function.arguments, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + else: + response_parts.append( + ToolCallPart( + tool_name=tool_name, + tool_call_id=tool_call_id, + args=tool_call.function.arguments, + ) + ) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + + response_parts.append( + BuiltinToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + + return result diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py new file mode 100644 index 0000000000..f004e15579 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -0,0 +1,94 @@ +"""Vercel AI protocol adapter for Pydantic AI agents. + +This module provides classes for integrating Pydantic AI agents with the Vercel AI protocol, +enabling streaming event-based communication for interactive AI applications. + +Converted to Python from: +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +""" + +from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata +from .adapter import VercelAIAdapter +from .event_stream import VercelAIEventStream, protocol_messages_to_pai_messages +from .request_types import ( + RequestData, + SubmitMessage, + TextUIPart, + ToolOutputAvailablePart, + UIMessage, + UIPart, + request_data_ta, +) +from .response_types import ( + AbortChunk, + AbstractSSEChunk, + DataUIMessageChunk, + DoneChunk, + ErrorChunk, + FileChunk, + FinishChunk, + FinishStepChunk, + MessageMetadataChunk, + ReasoningDeltaChunk, + ReasoningEndChunk, + ReasoningStartChunk, + SourceDocumentChunk, + SourceUrlChunk, + StartChunk, + StartStepChunk, + TextDeltaChunk, + TextEndChunk, + TextStartChunk, + ToolInputAvailableChunk, + ToolInputDeltaChunk, + ToolInputErrorChunk, + ToolInputStartChunk, + ToolOutputAvailableChunk, + ToolOutputErrorChunk, +) + +__all__ = [ + # Utilities + 'CamelBaseModel', + 'ProviderMetadata', + 'JSONValue', + 'VERCEL_AI_DSP_HEADERS', + # Request types + 'RequestData', + 'TextUIPart', + 'UIMessage', + 'ToolOutputAvailablePart', + 'UIPart', + 'SubmitMessage', + 'request_data_ta', + # Response types + 'AbstractSSEChunk', + 'TextStartChunk', + 'TextDeltaChunk', + 'TextEndChunk', + 'ReasoningStartChunk', + 'ReasoningDeltaChunk', + 'ReasoningEndChunk', + 'ErrorChunk', + 'ToolInputStartChunk', + 'ToolInputDeltaChunk', + 'ToolInputAvailableChunk', + 'ToolInputErrorChunk', + 'ToolOutputAvailableChunk', + 'ToolOutputErrorChunk', + 'SourceUrlChunk', + 'SourceDocumentChunk', + 'FileChunk', + 'DataUIMessageChunk', + 'StartStepChunk', + 'FinishStepChunk', + 'StartChunk', + 'FinishChunk', + 'AbortChunk', + 'MessageMetadataChunk', + 'DoneChunk', + # Event stream and adapter + 'VercelAIEventStream', + 'VercelAIAdapter', + 'protocol_messages_to_pai_messages', +] diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py new file mode 100644 index 0000000000..d3dfb4e8cd --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py @@ -0,0 +1,26 @@ +"""Utilities for Vercel AI protocol. + +Converted to Python from: +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +""" + +from abc import ABC +from typing import Any + +from pydantic import BaseModel, ConfigDict +from pydantic.alias_generators import to_camel + +__all__ = ('CamelBaseModel', 'ProviderMetadata', 'JSONValue', 'VERCEL_AI_DSP_HEADERS') + +# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol +VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} + +# Technically this is recursive union of JSON types; for simplicity, we call it Any +JSONValue = Any +ProviderMetadata = dict[str, dict[str, JSONValue]] + + +class CamelBaseModel(BaseModel, ABC): + """Base model with camelCase aliases.""" + + model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra='forbid') diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py new file mode 100644 index 0000000000..c38b9e365c --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py @@ -0,0 +1,149 @@ +"""Vercel AI adapter for handling requests.""" + +# pyright: reportGeneralTypeIssues=false + +from __future__ import annotations + +from collections.abc import AsyncIterator +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +from ...tools import AgentDepsT +from .event_stream import VercelAIEventStream +from .request_types import RequestData, TextUIPart, UIMessage +from .response_types import AbstractSSEChunk, DoneChunk + +if TYPE_CHECKING: + from ...agent import Agent + +__all__ = ['VercelAIAdapter'] + + +@dataclass +class VercelAIAdapter: + """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. + + This adapter provides a simplified interface for integrating Pydantic AI agents + with the Vercel AI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.vercel_ai import VercelAIAdapter + + agent = Agent('openai:gpt-4') + adapter = VercelAIAdapter(agent) + + async def handle_request(data: RequestData, deps=None): + async for chunk in adapter.run_stream(data, deps): + yield chunk.sse() + ``` + """ + + agent: Agent[AgentDepsT] + """The Pydantic AI agent to run.""" + + async def run_stream( + self, request: RequestData, deps: AgentDepsT | None = None + ) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: + """Stream events from an agent run as Vercel AI protocol events. + + Args: + request: The Vercel AI request data. + deps: Optional dependencies to pass to the agent. + + Yields: + Vercel AI protocol events (AbstractSSEChunk or DoneChunk). + + Raises: + ValueError: If request validation fails. + """ + # Parse messages from request + messages = self.parse_request_messages(request) + + # Extract prompt from last user message (for now, simple implementation) + if not messages: + raise ValueError('no messages provided') + + message = messages[-1] + prompt_parts: list[str] = [] + for part in message.parts: + if isinstance(part, TextUIPart): + prompt_parts.append(part.text) + else: + raise ValueError(f'Only text parts are supported yet, got {part}') + + # Create event stream + event_stream = self.create_event_stream() + + # Run agent and stream events + async for event in self.agent.run_stream_events('\n'.join(prompt_parts), deps=deps): + async for chunk in event_stream.agent_event_to_events(event): + yield chunk + + # Emit after-stream events + async for chunk in event_stream.after_stream(): + yield chunk + + def create_event_stream(self) -> VercelAIEventStream[AgentDepsT]: + """Create a new Vercel AI event stream. + + Returns: + A VercelAIEventStream instance. + """ + return VercelAIEventStream[AgentDepsT]() + + def parse_request_messages(self, request: RequestData) -> list[UIMessage]: + """Extract messages from the Vercel AI request. + + Args: + request: The Vercel AI request data. + + Returns: + List of UIMessage objects. + """ + return request.messages + + async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: + """Handle a request and return a streamed SSE response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + try: + from starlette.requests import Request + from starlette.responses import JSONResponse + except ImportError as e: # pragma: no cover + raise ImportError('Please install starlette to use dispatch_request') from e + + try: + from sse_starlette.sse import EventSourceResponse + except ImportError as e: # pragma: no cover + raise ImportError('Please install sse_starlette to use dispatch_request') from e + + from pydantic import ValidationError + + if not isinstance(request, Request): # pragma: no cover + raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') + + from .request_types import request_data_ta + + try: + data = request_data_ta.validate_json(await request.json()) + + async def run_sse() -> AsyncIterator[str]: + async for chunk in self.run_stream(data, deps=deps): + yield chunk.sse() + + from ._utils import VERCEL_AI_DSP_HEADERS + + return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) + except ValidationError as e: + return JSONResponse({'errors': e.errors()}, status_code=422) + except Exception as e: + return JSONResponse({'errors': str(e)}, status_code=500) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py new file mode 100644 index 0000000000..465a3bb7e8 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py @@ -0,0 +1,235 @@ +"""Vercel AI event stream implementation.""" + +# pyright: reportIncompatibleMethodOverride=false + +from __future__ import annotations + +from collections.abc import AsyncIterator +from typing import Any + +from pydantic_core import to_json + +from ...messages import ( + BuiltinToolCallEvent, # type: ignore[reportDeprecated] + BuiltinToolCallPart, + BuiltinToolResultEvent, # type: ignore[reportDeprecated] + BuiltinToolReturnPart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + ModelMessage, + RetryPromptPart, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, + ToolReturnPart, +) +from ...run import AgentRunResultEvent +from ...tools import AgentDepsT +from .. import BaseEventStream +from .request_types import TextUIPart, UIMessage +from .response_types import ( + AbstractSSEChunk, + DoneChunk, + ErrorChunk, + FinishChunk, + ReasoningDeltaChunk, + ReasoningStartChunk, + TextDeltaChunk, + TextStartChunk, + ToolInputDeltaChunk, + ToolInputStartChunk, + ToolOutputAvailableChunk, +) + +__all__ = ['VercelAIEventStream', 'protocol_messages_to_pai_messages'] + + +def _json_dumps(obj: Any) -> str: + """Dump an object to JSON string.""" + return to_json(obj).decode('utf-8') + + +class VercelAIEventStream(BaseEventStream[AbstractSSEChunk | DoneChunk, AgentDepsT]): + """Transforms Pydantic AI agent events into Vercel AI protocol events. + + This class handles the stateful transformation of streaming agent events + into the Vercel AI protocol format, managing message IDs and final result tool tracking. + + Example: + ```python + event_stream = VercelAIEventStream() + async for vercel_event in event_stream.agent_event_to_events(pai_event): + print(vercel_event.sse()) + ``` + """ + + def __init__(self) -> None: + """Initialize Vercel AI event stream state.""" + super().__init__() + self.new_message_id() # Generate a message ID at initialization + self._final_result_tool_id: str | None = None + + # Granular handlers implementation + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a TextPart at start.""" + yield TextStartChunk(id=self.message_id or self.new_message_id()) + if part.content: + yield TextDeltaChunk(id=self.message_id, delta=part.content) + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[AbstractSSEChunk]: + """Handle a TextPartDelta.""" + if delta.content_delta: + yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ThinkingPart at start.""" + if not self.message_id: + self.new_message_id() + yield ReasoningStartChunk(id=self.message_id) + if part.content: + yield ReasoningDeltaChunk(id=self.message_id, delta=part.content) + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ThinkingPartDelta.""" + if delta.content_delta: + yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) + + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) + if isinstance(part.args, str): + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) + elif part.args is not None: + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ToolCallPartDelta.""" + tool_call_id = delta.tool_call_id or '' + if isinstance(delta.args_delta, str): + yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=delta.args_delta) + elif delta.args_delta is not None: + yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(delta.args_delta)) + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a BuiltinToolReturnPart.""" + yield ToolOutputAvailableChunk(tool_call_id=part.tool_call_id, output=part.content) + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle a FunctionToolCallEvent. + + No Vercel AI events are emitted at this stage since tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle a FunctionToolResultEvent, emitting tool result events.""" + result = event.result + if isinstance(result, ToolReturnPart): + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + elif isinstance(result, RetryPromptPart): + # For retry prompts, emit the error content as tool output + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolCallEvent, emitting tool input events.""" + part = event.part + yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) + if isinstance(part.args, str): + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) + elif part.args is not None: + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) + + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolResultEvent, emitting tool output events.""" + result = event.result + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle a FinalResultEvent, tracking the final result tool.""" + if event.tool_call_id and event.tool_name: + self._final_result_tool_id = event.tool_call_id + yield ToolInputStartChunk(tool_call_id=event.tool_call_id, tool_name=event.tool_name) + + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle an AgentRunResultEvent. + + No additional Vercel AI events are emitted at this stage. + """ + return + yield # Make this an async generator + + async def after_stream(self) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: + """Yield events after agent streaming completes.""" + # Close the final result tool if there was one + if tool_call_id := self._final_result_tool_id: + yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) + yield FinishChunk() + yield DoneChunk() + + async def on_validation_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: + """Handle validation errors before stream starts.""" + yield ErrorChunk(error_text=str(error)) + + async def on_stream_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: + """Handle errors during streaming.""" + yield ErrorChunk(error_text=str(error)) + + +def protocol_messages_to_pai_messages(messages: list[UIMessage]) -> list[ModelMessage]: + """Convert Vercel AI protocol messages to Pydantic AI messages. + + Args: + messages: List of Vercel AI UIMessage objects. + + Returns: + List of Pydantic AI ModelMessage objects. + + Raises: + ValueError: If message format is not supported. + """ + from ...messages import ModelRequest, ModelResponse, SystemPromptPart, TextPart, UserPromptPart + + pai_messages: list[ModelMessage] = [] + + for msg in messages: + if msg.role == 'user': + # User message - extract text from parts + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + else: + raise ValueError(f'Only text parts are supported for user messages, got {type(part).__name__}') + + if texts: + pai_messages.append(ModelRequest(parts=[UserPromptPart(content='\n'.join(texts))])) + + elif msg.role == 'assistant': + # Assistant message - for now, just extract text + # Full reconstruction of ModelResponse with tool calls would require more complex logic + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + # TODO: Handle ToolOutputAvailablePart for full message history reconstruction + + if texts: + pai_messages.append(ModelResponse(parts=[TextPart(content='\n'.join(texts))])) + + elif msg.role == 'system': + # System message - not in standard Vercel AI protocol but might be custom + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + + if texts: + pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) + + return pai_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py new file mode 100644 index 0000000000..ea30436378 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py @@ -0,0 +1,70 @@ +"""Vercel AI request types (UI messages). + +Converted to Python from: +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +""" + +from typing import Any, Literal + +from pydantic import TypeAdapter + +from ._utils import CamelBaseModel, ProviderMetadata + +__all__ = [ + 'TextUIPart', + 'ToolOutputAvailablePart', + 'UIPart', + 'UIMessage', + 'SubmitMessage', + 'RequestData', + 'request_data_ta', +] + + +class TextUIPart(CamelBaseModel): + """A text part of a message.""" + + type: Literal['text'] = 'text' + text: str + state: Literal['streaming', 'done'] | None = None + provider_metadata: ProviderMetadata | None = None + + +class ToolOutputAvailablePart(CamelBaseModel): + """Tool output available part.""" + + type: str # f"tool-{tool_name}" + tool_call_id: str + state: Literal['output-available'] = 'output-available' + input: Any + output: Any + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + preliminary: bool | None = None + + +# Since ToolOutputAvailablePart has a dynamic type field, we can't use Discriminator +UIPart = TextUIPart | ToolOutputAvailablePart + + +class UIMessage(CamelBaseModel): + """A message in the UI protocol.""" + + id: str + role: Literal['user', 'assistant', 'system'] + metadata: dict[str, Any] | None = None + parts: list[UIPart] + + +class SubmitMessage(CamelBaseModel): + """Submit message request.""" + + trigger: Literal['submit-message'] = 'submit-message' + id: str + messages: list[UIMessage] + + +RequestData = SubmitMessage + +# Type adapter for parsing requests +request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py similarity index 81% rename from pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py index 8538dd9680..09b82c3a99 100644 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py @@ -1,19 +1,47 @@ -"""Convert to Python from. +"""Vercel AI response types (SSE chunks). +Converted to Python from: https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts - -Mostly with Claude. """ from typing import Any, Literal from ._utils import CamelBaseModel, ProviderMetadata +__all__ = [ + 'AbstractSSEChunk', + 'TextStartChunk', + 'TextDeltaChunk', + 'TextEndChunk', + 'ReasoningStartChunk', + 'ReasoningDeltaChunk', + 'ReasoningEndChunk', + 'ErrorChunk', + 'ToolInputStartChunk', + 'ToolInputDeltaChunk', + 'ToolInputAvailableChunk', + 'ToolInputErrorChunk', + 'ToolOutputAvailableChunk', + 'ToolOutputErrorChunk', + 'SourceUrlChunk', + 'SourceDocumentChunk', + 'FileChunk', + 'DataUIMessageChunk', + 'StartStepChunk', + 'FinishStepChunk', + 'StartChunk', + 'FinishChunk', + 'AbortChunk', + 'MessageMetadataChunk', + 'DoneChunk', +] + class AbstractSSEChunk(CamelBaseModel): - """Abstract base class for response SSE even.""" + """Abstract base class for response SSE events.""" def sse(self) -> str: + """Encode as SSE format.""" return self.model_dump_json(exclude_none=True, by_alias=True) @@ -74,29 +102,22 @@ class ErrorChunk(AbstractSSEChunk): error_text: str -class ToolInputAvailableChunk(AbstractSSEChunk): - """Tool input available chunk.""" +class ToolInputStartChunk(AbstractSSEChunk): + """Tool input start chunk.""" - type: Literal['tool-input-available'] = 'tool-input-available' + type: Literal['tool-input-start'] = 'tool-input-start' tool_call_id: str tool_name: str - input: Any provider_executed: bool | None = None - provider_metadata: ProviderMetadata | None = None dynamic: bool | None = None -class ToolInputErrorChunk(AbstractSSEChunk): - """Tool input error chunk.""" +class ToolInputDeltaChunk(AbstractSSEChunk): + """Tool input delta chunk.""" - type: Literal['tool-input-error'] = 'tool-input-error' + type: Literal['tool-input-delta'] = 'tool-input-delta' tool_call_id: str - tool_name: str - input: Any - provider_executed: bool | None = None - provider_metadata: ProviderMetadata | None = None - dynamic: bool | None = None - error_text: str + input_text_delta: str class ToolOutputAvailableChunk(AbstractSSEChunk): @@ -110,35 +131,48 @@ class ToolOutputAvailableChunk(AbstractSSEChunk): preliminary: bool | None = None -class ToolOutputErrorChunk(AbstractSSEChunk): - """Tool output error chunk.""" +class FinishChunk(AbstractSSEChunk): + """Finish chunk.""" - type: Literal['tool-output-error'] = 'tool-output-error' + type: Literal['finish'] = 'finish' + message_metadata: Any | None = None + + +class ToolInputAvailableChunk(AbstractSSEChunk): + """Tool input available chunk.""" + + type: Literal['tool-input-available'] = 'tool-input-available' tool_call_id: str - error_text: str + tool_name: str + input: Any provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None dynamic: bool | None = None -class ToolInputStartChunk(AbstractSSEChunk): - """Tool input start chunk.""" +class ToolInputErrorChunk(AbstractSSEChunk): + """Tool input error chunk.""" - type: Literal['tool-input-start'] = 'tool-input-start' + type: Literal['tool-input-error'] = 'tool-input-error' tool_call_id: str tool_name: str + input: Any provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None dynamic: bool | None = None + error_text: str -class ToolInputDeltaChunk(AbstractSSEChunk): - """Tool input delta chunk.""" +class ToolOutputErrorChunk(AbstractSSEChunk): + """Tool output error chunk.""" - type: Literal['tool-input-delta'] = 'tool-input-delta' + type: Literal['tool-output-error'] = 'tool-output-error' tool_call_id: str - input_text_delta: str + error_text: str + provider_executed: bool | None = None + dynamic: bool | None = None -# Source chunk types class SourceUrlChunk(AbstractSSEChunk): """Source URL chunk.""" @@ -187,7 +221,6 @@ class FinishStepChunk(AbstractSSEChunk): type: Literal['finish-step'] = 'finish-step' -# Message lifecycle chunk types class StartChunk(AbstractSSEChunk): """Start chunk.""" @@ -196,13 +229,6 @@ class StartChunk(AbstractSSEChunk): message_metadata: Any | None = None -class FinishChunk(AbstractSSEChunk): - """Finish chunk.""" - - type: Literal['finish'] = 'finish' - message_metadata: Any | None = None - - class AbortChunk(AbstractSSEChunk): """Abort chunk.""" @@ -214,3 +240,17 @@ class MessageMetadataChunk(AbstractSSEChunk): type: Literal['message-metadata'] = 'message-metadata' message_metadata: Any + + +class DoneChunk: + """Special marker chunk to indicate the end of the SSE stream.""" + + def sse(self) -> str: + """Encode as SSE done marker.""" + return '[DONE]' + + def __str__(self) -> str: + return 'DoneChunk' + + def __eq__(self, other: Any) -> bool: + return isinstance(other, DoneChunk) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py deleted file mode 100644 index 6ef877a235..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py +++ /dev/null @@ -1,18 +0,0 @@ -from abc import ABC -from typing import Any - -from pydantic import BaseModel, ConfigDict -from pydantic.alias_generators import to_camel - -__all__ = 'ProviderMetadata', 'CamelBaseModel' - -# technically this is recursive union of JSON types -# for to simplify validation, we call it Any -JSONValue = Any - -# Provider metadata types -ProviderMetadata = dict[str, dict[str, JSONValue]] - - -class CamelBaseModel(BaseModel, ABC): - model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra='forbid') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py deleted file mode 100644 index 4753ca90f5..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py +++ /dev/null @@ -1,274 +0,0 @@ -"""Convert to Python from. - -https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts - -Mostly with Claude. -""" - -from typing import Annotated, Any, Literal - -from pydantic import Discriminator, TypeAdapter - -from ._utils import CamelBaseModel, ProviderMetadata - - -class TextUIPart(CamelBaseModel): - """A text part of a message.""" - - type: Literal['text'] = 'text' - - text: str - """The text content.""" - - state: Literal['streaming', 'done'] | None = None - """The state of the text part.""" - - provider_metadata: ProviderMetadata | None = None - """The provider metadata.""" - - -class ReasoningUIPart(CamelBaseModel): - """A reasoning part of a message.""" - - type: Literal['reasoning'] = 'reasoning' - - text: str - """The reasoning text.""" - - state: Literal['streaming', 'done'] | None = None - """The state of the reasoning part.""" - - provider_metadata: ProviderMetadata | None = None - """The provider metadata.""" - - -class SourceUrlUIPart(CamelBaseModel): - """A source part of a message.""" - - type: Literal['source-url'] = 'source-url' - source_id: str - url: str - title: str | None = None - provider_metadata: ProviderMetadata | None = None - - -class SourceDocumentUIPart(CamelBaseModel): - """A document source part of a message.""" - - type: Literal['source-document'] = 'source-document' - source_id: str - media_type: str - title: str - filename: str | None = None - provider_metadata: ProviderMetadata | None = None - - -class FileUIPart(CamelBaseModel): - """A file part of a message.""" - - type: Literal['file'] = 'file' - - media_type: str - """ - IANA media type of the file. - - @see https://www.iana.org/assignments/media-types/media-types.xhtml - """ - - filename: str | None = None - """Optional filename of the file.""" - - url: str - """ - The URL of the file. - It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs). - """ - - provider_metadata: ProviderMetadata | None = None - """The provider metadata.""" - - -class StepStartUIPart(CamelBaseModel): - """A step boundary part of a message.""" - - type: Literal['step-start'] = 'step-start' - - -class DataUIPart(CamelBaseModel): - """Data part with dynamic type based on data name.""" - - type: str # Will be f"data-{NAME}" - id: str | None = None - data: Any - - -# Tool part states as separate models -class ToolInputStreamingPart(CamelBaseModel): - """Tool part in input-streaming state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['input-streaming'] = 'input-streaming' - input: Any | None = None - provider_executed: bool | None = None - - -class ToolInputAvailablePart(CamelBaseModel): - """Tool part in input-available state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['input-available'] = 'input-available' - input: Any - provider_executed: bool | None = None - call_provider_metadata: ProviderMetadata | None = None - - -class ToolOutputAvailablePart(CamelBaseModel): - """Tool part in output-available state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['output-available'] = 'output-available' - input: Any - output: Any - provider_executed: bool | None = None - call_provider_metadata: ProviderMetadata | None = None - preliminary: bool | None = None - - -class ToolOutputErrorPart(CamelBaseModel): - """Tool part in output-error state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['output-error'] = 'output-error' - input: Any | None = None - raw_input: Any | None = None - error_text: str - provider_executed: bool | None = None - call_provider_metadata: ProviderMetadata | None = None - - -# Union of all tool part states -ToolUIPart = ToolInputStreamingPart | ToolInputAvailablePart | ToolOutputAvailablePart | ToolOutputErrorPart - - -# Dynamic tool part states as separate models -class DynamicToolInputStreamingPart(CamelBaseModel): - """Dynamic tool part in input-streaming state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['input-streaming'] = 'input-streaming' - input: Any | None = None - - -class DynamicToolInputAvailablePart(CamelBaseModel): - """Dynamic tool part in input-available state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['input-available'] = 'input-available' - input: Any - call_provider_metadata: ProviderMetadata | None = None - - -class DynamicToolOutputAvailablePart(CamelBaseModel): - """Dynamic tool part in output-available state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['output-available'] = 'output-available' - input: Any - output: Any - call_provider_metadata: ProviderMetadata | None = None - preliminary: bool | None = None - - -class DynamicToolOutputErrorPart(CamelBaseModel): - """Dynamic tool part in output-error state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['output-error'] = 'output-error' - input: Any - error_text: str - call_provider_metadata: ProviderMetadata | None = None - - -# Union of all dynamic tool part states -DynamicToolUIPart = ( - DynamicToolInputStreamingPart - | DynamicToolInputAvailablePart - | DynamicToolOutputAvailablePart - | DynamicToolOutputErrorPart -) - - -UIMessagePart = ( - TextUIPart - | ReasoningUIPart - | ToolUIPart - | DynamicToolUIPart - | SourceUrlUIPart - | SourceDocumentUIPart - | FileUIPart - | DataUIPart - | StepStartUIPart -) -"""Union of all message part types.""" - - -class UIMessage(CamelBaseModel): - """A message as displayed in the UI by Vercel AI Elements.""" - - id: str - """A unique identifier for the message.""" - - role: Literal['system', 'user', 'assistant'] - """The role of the message.""" - - metadata: Any | None = None - """The metadata of the message.""" - - parts: list[UIMessagePart] - """ - The parts of the message. Use this for rendering the message in the UI. - - System messages should be avoided (set the system prompt on the server instead). - They can have text parts. - - User messages can have text parts and file parts. - - Assistant messages can have text, reasoning, tool invocation, and file parts. - """ - - -class SubmitMessage(CamelBaseModel, extra='allow'): - """Submit a message to the agent.""" - - trigger: Literal['submit-message'] - id: str - messages: list[UIMessage] - - # TODO (DouweM): Update, make variable? I like `inference_params` from OpenAI ChatKit. - # model: str - # web_search: bool - - -class RegenerateMessage(CamelBaseModel): - """Ask the agent to regenerate a message.""" - - trigger: Literal['regenerate-message'] - id: str - messages: list[UIMessage] - message_id: str - - -RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')] -request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py deleted file mode 100644 index b721192f03..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py +++ /dev/null @@ -1,122 +0,0 @@ -from __future__ import annotations as _annotations - -from collections.abc import AsyncIterator -from dataclasses import dataclass, field -from typing import Any -from uuid import uuid4 - -from pydantic_core import to_json - -from .. import messages -from . import response_types as _t - -__all__ = 'VERCEL_AI_DSP_HEADERS', 'EventStreamer' - -# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol -VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} - - -@dataclass -class EventStreamer: - """Logic for mapping pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE.""" - - message_id: str = field(default_factory=lambda: uuid4().hex) - _final_result_tool_id: str | None = field(default=None, init=False) - - async def event_to_chunks(self, event: messages.AgentStreamEvent) -> AsyncIterator[_t.AbstractSSEChunk]: # noqa C901 - """Convert pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE. - - Args: - event: The pydantic-ai event to convert. - - Yields: - An async iterator of Vercel AI Elements events. - """ - match event: - case messages.PartStartEvent(part=part): - match part: - case messages.TextPart(content=content): - yield _t.TextStartChunk(id=self.message_id) - yield _t.TextDeltaChunk(id=self.message_id, delta=content) - case ( - messages.ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) - | messages.BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) - ): - yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) - if isinstance(args, str): - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) - elif args is not None: - yield ( - _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) - ) - - case messages.BuiltinToolReturnPart( - tool_name=tool_name, tool_call_id=tool_call_id, content=content - ): - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) - - case messages.ThinkingPart(content=content): - yield _t.ReasoningStartChunk(id=self.message_id) - yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content) - - case messages.PartDeltaEvent(delta=delta): - match delta: - case messages.TextPartDelta(content_delta=content_delta): - yield _t.TextDeltaChunk(id=self.message_id, delta=content_delta) - case messages.ThinkingPartDelta(content_delta=content_delta): - if content_delta: - yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content_delta) - case messages.ToolCallPartDelta(args_delta=args, tool_call_id=tool_call_id): - tool_call_id = tool_call_id or '' - if isinstance(args, str): - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) - elif args is not None: - yield ( - _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) - ) - case messages.FinalResultEvent(tool_name=tool_name, tool_call_id=tool_call_id): - if tool_call_id and tool_name: - self._final_result_tool_id = tool_call_id - yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) - case messages.FunctionToolCallEvent(): - pass - # print(f'TODO FunctionToolCallEvent {part}') - case messages.FunctionToolResultEvent(result=result): - match result: - case messages.ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) - case messages.RetryPromptPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) - case messages.BuiltinToolCallEvent(part=part): - tool_call_id = part.tool_call_id - tool_name = part.tool_name - args = part.args - yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) - if isinstance(args, str): - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) - elif args is not None: - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) - case messages.BuiltinToolResultEvent(result=result): - yield _t.ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - - async def finish(self) -> AsyncIterator[_t.AbstractSSEChunk | DoneChunk]: - """Send extra messages required to close off the stream.""" - if tool_call_id := self._final_result_tool_id: - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) - yield _t.FinishChunk() - yield DoneChunk() - - -class DoneChunk: - def sse(self) -> str: - return '[DONE]' - - def __str__(self) -> str: - return 'DoneChunk' - - def __eq__(self, other: Any) -> bool: - return isinstance(other, DoneChunk) - - -def _json_dumps(obj: Any) -> str: - return to_json(obj).decode('utf-8') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py deleted file mode 100644 index 62e0a68964..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py +++ /dev/null @@ -1,85 +0,0 @@ -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import Generic - -from pydantic import ValidationError - -from ..agent import Agent -from ..run import AgentRunResultEvent -from ..tools import AgentDepsT -from .request_types import RequestData, TextUIPart, request_data_ta -from .response_stream import VERCEL_AI_DSP_HEADERS, DoneChunk, EventStreamer -from .response_types import AbstractSSEChunk - -try: - from sse_starlette.sse import EventSourceResponse - from starlette.requests import Request - from starlette.responses import JSONResponse, Response -except ImportError as e: - raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e - - -@dataclass -class StarletteChat(Generic[AgentDepsT]): - """Starlette support for Pydantic AI's Vercel AI Elements integration. - - This can be used with either FastAPI or Starlette apps. - """ - - agent: Agent[AgentDepsT] - - async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: - """Handle a request and return a streamed SSE response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - try: - data = request_data_ta.validate_json(await request.json()) - - async def run_sse() -> AsyncIterator[str]: - async for chunk in self.run(data, deps=deps): - yield chunk.sse() - - return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) - except ValidationError as e: - return JSONResponse({'errors': e.errors()}, status_code=422) - except Exception as e: - return JSONResponse({'errors': str(e)}, status_code=500) - - async def run(self, data: RequestData, deps: AgentDepsT = None) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: - """Stream events from an agent run as Vercel AI Elements events. - - Args: - data: The data to run the agent with. - deps: The dependencies to pass to the agent. - - Yields: - An async iterator text lines to stream over SSE. - """ - # TODO (DouweM): Use .model and .builtin_tools - - # TODO: Use entire message history - - if not data.messages: - raise ValueError('no messages provided') - - message = data.messages[-1] - prompt: list[str] = [] - for part in message.parts: - if isinstance(part, TextUIPart): - prompt.append(part.text) - else: - raise ValueError(f'Only text parts are supported yet, got {part}') - - event_streamer = EventStreamer() - async for event in self.agent.run_stream_events('\n'.join(prompt), deps=deps): - if not isinstance(event, AgentRunResultEvent): - async for chunk in event_streamer.event_to_chunks(event): - yield chunk - async for chunk in event_streamer.finish(): - yield chunk diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index cf321274b7..bcb57d9158 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -2,7 +2,6 @@ from __future__ import annotations -import contextlib import json import uuid from collections.abc import AsyncIterator @@ -47,10 +46,11 @@ from pydantic_ai.output import OutputDataT from pydantic_ai.tools import AgentDepsT, ToolDefinition -from .conftest import IsDatetime, IsSameStr +from .conftest import IsDatetime, IsSameStr, try_import has_ag_ui: bool = False -with contextlib.suppress(ImportError): +with try_import() as imports_successful: + has_ag_ui = imports_successful() from ag_ui.core import ( AssistantMessage, CustomEvent, @@ -72,16 +72,14 @@ SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, - _messages_from_ag_ui, # type: ignore[reportPrivateUsage] run_ag_ui, ) - - has_ag_ui = True + from pydantic_ai.ui.ag_ui.event_stream import protocol_messages_to_pai_messages pytestmark = [ pytest.mark.anyio, - pytest.mark.skipif(not has_ag_ui, reason='ag-ui-protocol not installed'), + pytest.mark.skipif(not imports_successful, reason='ag-ui-protocol not installed'), pytest.mark.filterwarnings( 'ignore:`BuiltinToolCallEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolCallPart` instead.:DeprecationWarning' ), @@ -1367,7 +1365,7 @@ def error_callback(run_result: AgentRunResult[Any]) -> None: assert any(event['type'] == 'RUN_ERROR' for event in events) -async def test_messages_from_ag_ui() -> None: +async def test_protocol_messages_to_pai_messages() -> None: messages = [ SystemMessage( id='msg_1', @@ -1450,7 +1448,7 @@ async def test_messages_from_ag_ui() -> None: ), ] - assert _messages_from_ag_ui(messages) == snapshot( + assert protocol_messages_to_pai_messages(messages) == snapshot( [ ModelRequest( parts=[ diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 9ac0fdc908..be55bcccec 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -7,13 +7,15 @@ from pydantic_ai.builtin_tools import WebSearchTool from pydantic_ai.models.openai import OpenAIResponsesModel from pydantic_ai.providers.openai import OpenAIProvider -from pydantic_ai.vercel_ai.request_types import ( +from pydantic_ai.ui.vercel_ai import VercelAIAdapter +from pydantic_ai.ui.vercel_ai.request_types import ( SubmitMessage, TextUIPart, ToolOutputAvailablePart, UIMessage, ) -from pydantic_ai.vercel_ai.response_types import ( +from pydantic_ai.ui.vercel_ai.response_types import ( + DoneChunk, FinishChunk, ReasoningDeltaChunk, ReasoningStartChunk, @@ -23,7 +25,6 @@ ToolInputStartChunk, ToolOutputAvailableChunk, ) -from pydantic_ai.vercel_ai.starlette import DoneChunk, StarletteChat from .conftest import IsStr @@ -42,7 +43,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) agent = Agent(model=model, builtin_tools=[WebSearchTool()]) - chat = StarletteChat(agent) + adapter = VercelAIAdapter(agent) data = SubmitMessage( trigger='submit-message', @@ -151,7 +152,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): ], ) - events = [event async for event in chat.run(data, None)] + events = [event async for event in adapter.run_stream(data, None)] assert events == snapshot( [ ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), From 6cae960c68e0a5f7adb641d45040459fdc1e13f2 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 9 Oct 2025 16:30:43 +0000 Subject: [PATCH 05/10] Flesh out Adapter and EventStream --- examples/pydantic_ai_examples/chat_app.py | 7 +- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 3 + pydantic_ai_slim/pydantic_ai/ag_ui.py | 118 +- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 451 +------ pydantic_ai_slim/pydantic_ai/ui/adapter.py | 370 ++++++ .../pydantic_ai/ui/ag_ui/__init__.py | 8 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 248 ++++ .../pydantic_ai/ui/ag_ui/_event_stream.py | 274 +++++ .../pydantic_ai/ui/ag_ui/adapter.py | 334 ----- .../pydantic_ai/ui/ag_ui/event_stream.py | 548 --------- .../pydantic_ai/ui/event_stream.py | 377 ++++++ .../pydantic_ai/ui/vercel_ai/__init__.py | 17 +- .../pydantic_ai/ui/vercel_ai/_adapter.py | 183 +++ .../{event_stream.py => _event_stream.py} | 137 +-- .../{request_types.py => _request_types.py} | 0 .../{response_types.py => _response_types.py} | 69 +- .../pydantic_ai/ui/vercel_ai/adapter.py | 149 --- tests/test_ag_ui.py | 52 +- tests/test_vercel_ai.py | 1081 +++++++++-------- 19 files changed, 2153 insertions(+), 2273 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{event_stream.py => _event_stream.py} (61%) rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{request_types.py => _request_types.py} (100%) rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{response_types.py => _response_types.py} (74%) delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index 4e2f29ddf7..ea37a576e5 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -16,7 +16,7 @@ from fastapi import Depends, Request, Response from pydantic_ai import Agent, RunContext -from pydantic_ai.vercel_ai.starlette import StarletteChat +from pydantic_ai.ui.vercel_ai import VercelAIAdapter from .sqlite_database import Database @@ -80,7 +80,6 @@ async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str: return '\n'.join([row[0] for row in rows]) -starlette_chat = StarletteChat(chat_agent) app = fastapi.FastAPI(lifespan=lifespan) logfire.instrument_fastapi(app) @@ -96,7 +95,9 @@ def options_chat(): @app.post('/api/chat') async def get_chat(request: Request, database: Database = Depends(get_db)) -> Response: - return await starlette_chat.dispatch_request(request, deps=Deps(database, 123)) + return await VercelAIAdapter[Deps].dispatch_request( + chat_agent, request, deps=Deps(database, 123) + ) if __name__ == '__main__': diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index d7a54c5c71..fc883eee9b 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -243,6 +243,9 @@ async def run( # noqa: C901 if self.user_prompt is not None: parts.append(_messages.UserPromptPart(self.user_prompt)) + if not parts: + raise exceptions.UserError('No messages provided') + next_message = _messages.ModelRequest(parts=parts) next_message.instructions = await ctx.deps.get_instructions(run_context) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index 0ae9211bbc..b4c32d8a12 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -6,19 +6,12 @@ from __future__ import annotations -import json -from collections.abc import AsyncIterator, Awaitable, Callable, Mapping, Sequence -from http import HTTPStatus -from typing import ( - Any, - Final, - Generic, - TypeAlias, -) +from collections.abc import AsyncIterator, Callable, Mapping, Sequence +from typing import Any, Generic -from pydantic import ValidationError - -from .agent import AbstractAgent, AgentRunResult +from . import DeferredToolResults +from .agent import AbstractAgent +from .messages import ModelMessage from .models import KnownModelName, Model from .output import OutputDataT, OutputSpec from .settings import ModelSettings @@ -27,11 +20,13 @@ from .usage import RunUsage, UsageLimits try: + from ag_ui.core.types import RunAgentInput + + from .ui import OnCompleteFunc, StateDeps, StateHandler from .ui.ag_ui import ( - StateDeps, - StateHandler, + SSE_CONTENT_TYPE, + AGUIAdapter, ) - from .ui.ag_ui.event_stream import RunAgentInput, _RunError # type: ignore[reportPrivateUsage] except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use `Agent.to_ag_ui()` method, ' @@ -42,7 +37,7 @@ from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.requests import Request - from starlette.responses import Response, StreamingResponse + from starlette.responses import Response from starlette.routing import BaseRoute from starlette.types import ExceptionHandler, Lifespan except ImportError as e: # pragma: no cover @@ -62,14 +57,6 @@ 'run_ag_ui', ] -SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' -"""Content type header value for Server-Sent Events (SSE).""" - -OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]] -"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async.""" - -_BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' - class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette): """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" @@ -199,32 +186,18 @@ async def handle_ag_ui_request( Returns: A streaming Starlette response with AG-UI protocol events. """ - accept = request.headers.get('accept', SSE_CONTENT_TYPE) - try: - input_data = RunAgentInput.model_validate(await request.json()) - except ValidationError as e: # pragma: no cover - return Response( - content=json.dumps(e.json()), - media_type='application/json', - status_code=HTTPStatus.UNPROCESSABLE_ENTITY, - ) - - return StreamingResponse( - run_ag_ui( - agent, - input_data, - accept, - output_type=output_type, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ), - media_type=accept, + return await AGUIAdapter.dispatch_request( + agent, + request, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, ) @@ -234,6 +207,8 @@ async def run_ag_ui( accept: str = SSE_CONTENT_TYPE, *, output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, @@ -265,32 +240,21 @@ async def run_ag_ui( Yields: Streaming event chunks encoded as strings according to the accept header value. """ - from .ui.ag_ui import AGUIAdapter - - adapter = AGUIAdapter(agent=agent) - async for event_str in adapter.run_stream_sse( - request=run_input, + adapter = AGUIAdapter(agent=agent, request=run_input) + async for event in adapter.encode_stream( + adapter.run_stream( + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ), accept=accept, - output_type=output_type, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, ): - yield event_str - - -# _ToolCallNotFoundError is defined here (not in ui/ag_ui) since it's specific to this module -class _ToolCallNotFoundError(_RunError, ValueError): - """Exception raised when an tool result is present without a matching call.""" - - def __init__(self, tool_call_id: str) -> None: - """Initialize the exception with the tool call ID.""" - super().__init__( # pragma: no cover - message=f'Tool call with ID {tool_call_id} not found in the history.', - code='tool_call_not_found', - ) + yield event diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index dec4830efe..44c2611879 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -4,454 +4,15 @@ that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). """ -# pyright: reportIncompatibleMethodOverride=false, reportUnknownVariableType=false, reportGeneralTypeIssues=false - from __future__ import annotations -from abc import ABC, abstractmethod -from collections.abc import AsyncIterator -from typing import Any, Generic, Protocol, TypeVar, runtime_checkable -from uuid import uuid4 - -from ..messages import ( - AgentStreamEvent, - BuiltinToolCallEvent, # type: ignore[reportDeprecated] - BuiltinToolCallPart, - BuiltinToolResultEvent, # type: ignore[reportDeprecated] - BuiltinToolReturnPart, - FilePart, - FinalResultEvent, - FunctionToolCallEvent, - FunctionToolResultEvent, - ModelMessage, - PartDeltaEvent, - PartStartEvent, - TextPart, - TextPartDelta, - ThinkingPart, - ThinkingPartDelta, - ToolCallPart, - ToolCallPartDelta, -) -from ..run import AgentRunResultEvent -from ..tools import AgentDepsT +from .adapter import BaseAdapter, OnCompleteFunc, StateDeps, StateHandler +from .event_stream import BaseEventStream __all__ = [ - 'SSEEvent', - 'BaseEventStream', 'BaseAdapter', + 'BaseEventStream', + 'StateDeps', + 'StateHandler', + 'OnCompleteFunc', ] - -EventT = TypeVar('EventT', bound='SSEEvent') -"""Type variable for protocol-specific event types.""" - - -@runtime_checkable -class SSEEvent(Protocol): - """Protocol for events that can be encoded as Server-Sent Events (SSE).""" - - def sse(self) -> str: - """Encode event as a Server-Sent Event string. - - Returns: - The SSE-formatted string representation of the event. - """ - ... - - -class BaseEventStream(ABC, Generic[EventT, AgentDepsT]): - """Base class for transforming pAI agent events into protocol-specific events. - - This class provides a granular method-per-part-type pattern that makes it easy to customize - specific event transformations by overriding individual handler methods. - - Subclasses should: - 1. Initialize state attributes in `__init__` if needed - 2. Implement all abstract `handle_*` methods for event transformation - 3. Implement error handling methods - 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) - - Example: - ```python - class MyEventStream(BaseEventStream[MyEvent, MyDeps]): - def __init__(self): - super().__init__() - self.custom_state = {} - - async def handle_text_start(self, part: TextPart): - yield MyTextStartEvent(id=self.new_message_id(), text=part.content) - ``` - """ - - def __init__(self) -> None: - """Initialize event stream state. - - Subclasses can add additional state attributes for tracking streaming context. - """ - self.message_id: str = '' - - def new_message_id(self) -> str: - """Generate and store a new message ID. - - Returns: - A new UUID-based message ID. - """ - self.message_id = str(uuid4()) - return self.message_id - - async def agent_event_to_events(self, event: AgentStreamEvent | AgentRunResultEvent) -> AsyncIterator[EventT]: # noqa: C901 - """Transform a pAI agent event into protocol-specific events. - - This method dispatches to specific `handle_*` methods based on event and part type. - Subclasses should implement the individual handler methods rather than overriding this. - - Args: - event: The pAI agent event to transform. - - Yields: - Protocol-specific events. - """ - match event: - case PartStartEvent(part=part): - # Dispatch based on part type - match part: - case TextPart(): - async for e in self.handle_text_start(part): - yield e - case ThinkingPart(): - async for e in self.handle_thinking_start(part): - yield e - case ToolCallPart() | BuiltinToolCallPart(): - async for e in self.handle_tool_call_start(part): - yield e - case BuiltinToolReturnPart(): - async for e in self.handle_builtin_tool_return(part): - yield e - case FilePart(): - # FilePart is not currently handled by UI protocols - pass - case PartDeltaEvent(delta=delta): - # Dispatch based on delta type - match delta: - case TextPartDelta(): - async for e in self.handle_text_delta(delta): - yield e - case ThinkingPartDelta(): - async for e in self.handle_thinking_delta(delta): - yield e - case ToolCallPartDelta(): - async for e in self.handle_tool_call_delta(delta): - yield e - case FunctionToolCallEvent(): - async for e in self.handle_function_tool_call(event): - yield e - case FunctionToolResultEvent(): - async for e in self.handle_function_tool_result(event): - yield e - case BuiltinToolCallEvent(): # type: ignore[reportDeprecated] - async for e in self.handle_builtin_tool_call(event): - yield e - case BuiltinToolResultEvent(): # type: ignore[reportDeprecated] - async for e in self.handle_builtin_tool_result(event): - yield e - case FinalResultEvent(): - async for e in self.handle_final_result(event): - yield e - case AgentRunResultEvent(): - async for e in self.handle_run_result(event): - yield e - - # Granular part handlers (abstract - must implement) - - @abstractmethod - async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: - """Handle a TextPart at start. - - Args: - part: The TextPart. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: - """Handle a TextPartDelta. - - Args: - delta: The TextPartDelta. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: - """Handle a ThinkingPart at start. - - Args: - part: The ThinkingPart. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: - """Handle a ThinkingPartDelta. - - Args: - delta: The ThinkingPartDelta. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[EventT]: - """Handle a ToolCallPart or BuiltinToolCallPart at start. - - Args: - part: The tool call part. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: - """Handle a ToolCallPartDelta. - - Args: - delta: The ToolCallPartDelta. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: - """Handle a BuiltinToolReturnPart. - - Args: - part: The BuiltinToolReturnPart. - - Yields: - Protocol-specific events. - """ - - # Tool event handlers (abstract - must implement) - - @abstractmethod - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolCallEvent. - - Args: - event: The function tool call event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolResultEvent. - - Args: - event: The function tool result event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolCallEvent. - - Args: - event: The builtin tool call event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolResultEvent. - - Args: - event: The builtin tool result event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: - """Handle a FinalResultEvent. - - Args: - event: The final result event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: - """Handle an AgentRunResultEvent (final event with result/usage). - - Args: - event: The agent run result event. - - Yields: - Protocol-specific events. - """ - - # Lifecycle hooks (optional overrides) - - async def before_stream(self) -> AsyncIterator[EventT]: - """Yield events before agent streaming starts. - - This hook is called before any agent events are processed. - Override this to inject custom events at the start of the stream. - - Yields: - Protocol-specific events to emit before streaming. - """ - return - yield # Make this an async generator - - async def after_stream(self) -> AsyncIterator[EventT]: - """Yield events after agent streaming completes. - - This hook is called after all agent events have been processed. - Override this to inject custom events at the end of the stream. - - Yields: - Protocol-specific events to emit after streaming. - """ - return - yield # Make this an async generator - - # Error handling (must implement) - - @abstractmethod - async def on_validation_error(self, error: Exception) -> AsyncIterator[EventT]: - """Handle validation errors that occur before streaming starts. - - Args: - error: The validation error that occurred. - - Yields: - Protocol-specific error events. - """ - - @abstractmethod - async def on_stream_error(self, error: Exception) -> AsyncIterator[EventT]: - """Handle errors that occur during streaming (after stream has started). - - Args: - error: The error that occurred during streaming. - - Yields: - Protocol-specific error events. - """ - - -RequestT = TypeVar('RequestT') -"""Type variable for protocol-specific request types.""" - -MessageT = TypeVar('MessageT') -"""Type variable for protocol-specific message types.""" - - -class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): - """Base adapter for handling UI protocol requests and streaming responses. - - This class provides a unified interface for request/response handling across different - UI protocols (AG-UI, Vercel AI, etc.). It handles: - - Request parsing and validation - - Message format conversion (protocol messages → pAI messages) - - Agent execution and event streaming - - Error handling (validation errors vs streaming errors) - - SSE encoding - - Type Parameters: - RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) - MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) - EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, AbstractSSEChunk) - AgentDepsT: Agent dependencies type - - Example: - ```python - class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): - def create_event_stream(self) -> BaseEventStream[MyEvent, MyDeps]: - return MyEventStream() - - def parse_request_messages(self, request: MyRequest) -> list[MyMessage]: - return request.messages - - def protocol_messages_to_pai_messages(self, messages: list[MyMessage]) -> list[ModelMessage]: - # Convert protocol messages to pAI messages - ... - ``` - """ - - @abstractmethod - def create_event_stream(self) -> BaseEventStream[EventT, AgentDepsT]: - """Create a new event stream for this protocol. - - Returns: - A protocol-specific event stream instance. - """ - - @abstractmethod - def parse_request_messages(self, request: RequestT) -> list[MessageT]: - """Extract messages from the protocol request. - - Args: - request: The protocol-specific request. - - Returns: - List of protocol-specific messages. - """ - - @abstractmethod - def protocol_messages_to_pai_messages(self, messages: list[MessageT]) -> list[ModelMessage]: - """Convert protocol messages to Pydantic AI messages. - - Args: - messages: List of protocol-specific messages. - - Returns: - List of Pydantic AI ModelMessage objects. - """ - - @abstractmethod - def encode_event(self, event: EventT) -> str: - """Encode a protocol event as an SSE string. - - Args: - event: The protocol-specific event. - - Returns: - SSE-formatted string. - """ - - @abstractmethod - async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: - """Handle a request and return a response. - - This method should handle the full request/response cycle: - - Parse and validate the request - - Run the agent with the request data - - Return an appropriate response (e.g., StreamingResponse, EventSourceResponse) - - Args: - request: The protocol-specific request object (e.g., Starlette Request). - deps: Optional dependencies to pass to the agent. - - Returns: - A protocol-specific response object. - """ diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py new file mode 100644 index 0000000000..00b8afa311 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -0,0 +1,370 @@ +"""Base classes for UI event stream protocols. + +This module provides abstract base classes for implementing UI event stream adapters +that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator, Awaitable, Callable, Sequence +from dataclasses import Field, dataclass, replace +from functools import cached_property +from http import HTTPStatus +from typing import ( + Any, + ClassVar, + Generic, + Protocol, + TypeAlias, + TypeVar, + runtime_checkable, +) + +from pydantic import BaseModel, ValidationError +from starlette.responses import StreamingResponse + +from .. import DeferredToolRequests, DeferredToolResults, _utils +from ..agent import AbstractAgent, AgentDepsT, AgentRunResult +from ..exceptions import UserError +from ..messages import ModelMessage +from ..models import KnownModelName, Model +from ..output import OutputSpec +from ..settings import ModelSettings +from ..toolsets import AbstractToolset +from ..usage import RunUsage, UsageLimits +from .event_stream import BaseEventStream, SourceEvent + +try: + from starlette.requests import Request + from starlette.responses import Response +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + + +__all__ = [ + 'BaseAdapter', +] + +EventT = TypeVar('EventT') +"""Type variable for protocol-specific event types.""" + + +RequestT = TypeVar('RequestT') +"""Type variable for protocol-specific request types.""" + +MessageT = TypeVar('MessageT') +"""Type variable for protocol-specific message types.""" + +OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]] +"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async.""" + + +# State management types + +StateT = TypeVar('StateT', bound=BaseModel) +"""Type variable for the state type, which must be a subclass of `BaseModel`.""" + + +@runtime_checkable +class StateHandler(Protocol): + """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" + + # Has to be a dataclass so we can use `replace` to update the state. + # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 + __dataclass_fields__: ClassVar[dict[str, Field[Any]]] + + @property + def state(self) -> Any: + """Get the current state of the agent run.""" + ... + + @state.setter + def state(self, state: Any) -> None: + """Set the state of the agent run. + + This method is called to update the state of the agent run with the + provided state. + + Args: + state: The run state. + + Raises: + InvalidStateError: If `state` does not match the expected model. + """ + ... + + +@dataclass +class StateDeps(Generic[StateT]): + """Provides AG-UI state management. + + This class is used to manage the state of an agent run. It allows setting + the state of the agent run with a specific type of state model, which must + be a subclass of `BaseModel`. + + The state is set using the `state` setter by the `Adapter` when the run starts. + + Implements the `StateHandler` protocol. + """ + + state: StateT + + +@dataclass +class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): + """Base adapter for handling UI protocol requests and streaming responses. + + This class provides a unified interface for request/response handling across different + UI protocols (AG-UI, Vercel AI, etc.). It handles: + - Request parsing and validation + - Message format conversion (protocol messages → pAI messages) + - Agent execution and event streaming + - Error handling (validation errors vs streaming errors) + - SSE encoding + + Type Parameters: + RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) + MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) + EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, BaseChunk) + AgentDepsT: Agent dependencies type + + Example: + ```python + class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): + def messages(self, messages: list[MyMessage]) -> list[ModelMessage]: + # Convert protocol messages to pAI messages + ... + ``` + """ + + agent: AbstractAgent[AgentDepsT] + """The Pydantic AI agent to run.""" + + request: RequestT + """The protocol-specific request object.""" + + @abstractmethod + def create_event_stream(self) -> BaseEventStream[RequestT, EventT, AgentDepsT]: + """Create an event stream for the adapter.""" + + @cached_property + @abstractmethod + def messages(self) -> list[ModelMessage]: + """Convert protocol messages to Pydantic AI messages. + + Args: + messages: List of protocol-specific messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + + @cached_property + def toolset(self) -> AbstractToolset[AgentDepsT] | None: + """Get the toolset for the adapter.""" + return None + + @cached_property + def raw_state(self) -> dict[str, Any]: + """Get the state of the agent run.""" + return {} + + def deps_with_state(self, deps: AgentDepsT) -> AgentDepsT: + raw_state: dict[str, Any] = self.raw_state + if isinstance(deps, StateHandler): + if isinstance(deps.state, BaseModel): + state = type(deps.state).model_validate(raw_state) + else: + state = raw_state + + return replace(deps, state=state) + elif raw_state: + raise UserError( + f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' + ) + + @abstractmethod + def encode_event(self, event: EventT, accept: str | None = None) -> str: + """Encode a protocol event as an SSE string. + + Args: + event: The protocol-specific event. + accept: The accept header value for encoding format. + + Returns: + Formatted string. + """ + + async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: + """Encode a stream of events as SSE strings. + + Args: + stream: The stream of events to encode. + accept: The accept header value for encoding format. + """ + async for event in stream: + yield self.encode_event(event, accept) + + @classmethod + async def validate_request(cls, request: Request) -> RequestT: + """Validate the request and return the validated request.""" + raise NotImplementedError('validate_request is not implemented') + + async def process_stream( + self, + stream: AsyncIterator[SourceEvent], + on_complete: OnCompleteFunc | None = None, + ) -> AsyncIterator[EventT]: + """Process a stream of events and return a stream of events. + + Args: + stream: The stream of events to process. + on_complete: Optional callback function called when the agent run completes successfully. + """ + event_stream = self.create_event_stream() + async for event in event_stream.handle_stream(stream): + yield event + + try: + result = event_stream.result + if on_complete is not None and result is not None: + if _utils.is_async_callable(on_complete): + await on_complete(result) + else: + await _utils.run_in_executor(on_complete, result) + except Exception as e: + async for event in event_stream.on_error(e): + yield event + + async def run_stream( + self, + *, + output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + on_complete: OnCompleteFunc | None = None, + ) -> AsyncIterator[EventT]: + """Run the agent with the AG-UI run input and stream AG-UI protocol events. + + Args: + agent: The agent to run. + run_input: The AG-UI run input containing thread_id, run_id, messages, etc. + accept: The accept header value for the run. + + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + + Yields: + Streaming event chunks encoded as strings according to the accept header value. + """ + message_history = [*(message_history or []), *self.messages] + + toolset = self.toolset + if toolset: + output_type = [output_type or self.agent.output_type, DeferredToolRequests] + toolsets = [*toolsets, toolset] if toolsets else [toolset] + + deps = self.deps_with_state(deps) + + async for event in self.process_stream( + self.agent.run_stream_events( + user_prompt=None, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + ), + on_complete=on_complete, + ): + yield event + + @classmethod + async def dispatch_request( + cls, + agent: AbstractAgent[AgentDepsT, Any], + request: Request, + *, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + output_type: OutputSpec[Any] | None = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + on_complete: OnCompleteFunc | None = None, + ) -> Response: + """Handle an AG-UI request and return a streaming response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Returns: + A streaming Starlette response with AG-UI protocol events. + """ + try: + request_data = await cls.validate_request(request) + except ValidationError as e: # pragma: no cover + return Response( + content=e.json(), + media_type='application/json', + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, + ) + + adapter = cls(agent=agent, request=request_data) + + return StreamingResponse( + adapter.encode_stream( + adapter.run_stream( + message_history=message_history, + deferred_tool_results=deferred_tool_results, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ), + accept=request.headers.get('accept'), + ) + ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 620b74cd28..53784efb51 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,12 +1,10 @@ """AG-UI protocol integration for Pydantic AI agents.""" -from .adapter import AGUIAdapter -from .event_stream import AGUIEventStream, StateDeps, StateHandler, protocol_messages_to_pai_messages +from ._adapter import SSE_CONTENT_TYPE, AGUIAdapter +from ._event_stream import AGUIEventStream __all__ = [ 'AGUIAdapter', 'AGUIEventStream', - 'StateHandler', - 'StateDeps', - 'protocol_messages_to_pai_messages', + 'SSE_CONTENT_TYPE', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py new file mode 100644 index 0000000000..0681caa2c4 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -0,0 +1,248 @@ +"""AG-UI adapter for handling requests.""" + +from __future__ import annotations + +from functools import cached_property +from typing import ( + TYPE_CHECKING, + Any, + Final, +) + +from ... import ExternalToolset, ToolDefinition +from ...agent import AgentDepsT +from ...messages import ( + BuiltinToolCallPart, + BuiltinToolReturnPart, + ModelMessage, + ModelRequest, + ModelRequestPart, + ModelResponse, + ModelResponsePart, + SystemPromptPart, + TextPart, + ToolCallPart, + ToolReturnPart, + UserPromptPart, +) +from ...toolsets import AbstractToolset + +try: + from ag_ui.core import ( + AssistantMessage, + BaseEvent, + DeveloperMessage, + Message, + RunAgentInput, + SystemMessage, + Tool as AGUITool, + ToolMessage, + UserMessage, + ) + from ag_ui.encoder import EventEncoder + + from ..adapter import BaseAdapter + from ..event_stream import BaseEventStream + from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +try: + from starlette.requests import Request +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +if TYPE_CHECKING: + pass + +__all__ = ['AGUIAdapter'] + + +SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' +"""Content type header value for Server-Sent Events (SSE).""" + +# Frontend toolset + + +class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): + """Toolset for AG-UI frontend tools.""" + + def __init__(self, tools: list[AGUITool]): + """Initialize the toolset with AG-UI tools. + + Args: + tools: List of AG-UI tool definitions. + """ + super().__init__( + [ + ToolDefinition( + name=tool.name, + description=tool.description, + parameters_json_schema=tool.parameters, + ) + for tool in tools + ] + ) + + @property + def label(self) -> str: + """Return the label for this toolset.""" + return 'the AG-UI frontend tools' # pragma: no cover + + +class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT]): + """Adapter for handling AG-UI protocol requests with Pydantic AI agents. + + This adapter provides an interface for integrating Pydantic AI agents + with the AG-UI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.ag_ui import AGUIAdapter + + agent = Agent('openai:gpt-4') + adapter = AGUIAdapter(agent) + + async def handle_request(request: RunAgentInput, deps=None): + async for event_str in adapter.run_stream_encoded(request, deps): + yield event_str + ``` + """ + + def create_event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: + """Create an event stream for the adapter.""" + return AGUIEventStream(self.request) + + @classmethod + async def validate_request(cls, request: Request) -> RunAgentInput: + """Validate the request and return the validated request.""" + return RunAgentInput.model_validate(await request.json()) + + @cached_property + def toolset(self) -> AbstractToolset[AgentDepsT] | None: + """Get the toolset for the adapter.""" + if self.request.tools: + return _AGUIFrontendToolset[AgentDepsT](self.request.tools) + return None + + @cached_property + def raw_state(self) -> dict[str, Any]: + """Get the state of the agent run.""" + return self.request.state or {} + + def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: + """Encode an AG-UI event as SSE. + + Args: + event: The AG-UI event to encode. + accept: The accept header value for encoding format. + + Returns: + The SSE-formatted string. + """ + encoder = EventEncoder(accept=accept or SSE_CONTENT_TYPE) + return encoder.encode(event) + + @cached_property + def messages(self) -> list[ModelMessage]: + """Convert AG-UI messages to Pydantic AI messages. + + Args: + messages: List of AG-UI messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + result: list[ModelMessage] = [] + tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. + request_parts: list[ModelRequestPart] | None = None + response_parts: list[ModelResponsePart] | None = None + + for msg in self.request.messages: + if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( + isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if request_parts is None: + request_parts = [] + result.append(ModelRequest(parts=request_parts)) + response_parts = None + + if isinstance(msg, UserMessage): + request_parts.append(UserPromptPart(content=msg.content)) + elif isinstance(msg, SystemMessage | DeveloperMessage): + request_parts.append(SystemPromptPart(content=msg.content)) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + + request_parts.append( + ToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + ) + ) + + elif isinstance(msg, AssistantMessage) or ( # pragma: no branch + isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if response_parts is None: + response_parts = [] + result.append(ModelResponse(parts=response_parts)) + request_parts = None + + if isinstance(msg, AssistantMessage): + if msg.content: + response_parts.append(TextPart(content=msg.content)) + + if msg.tool_calls: + for tool_call in msg.tool_calls: + tool_call_id = tool_call.id + tool_name = tool_call.function.name + tool_calls[tool_call_id] = tool_name + + if tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX): + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + response_parts.append( + BuiltinToolCallPart( + tool_name=tool_name, + args=tool_call.function.arguments, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + else: + response_parts.append( + ToolCallPart( + tool_name=tool_name, + tool_call_id=tool_call_id, + args=tool_call.function.arguments, + ) + ) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + + response_parts.append( + BuiltinToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + + return result diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py new file mode 100644 index 0000000000..e472bc204f --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -0,0 +1,274 @@ +"""AG-UI protocol adapter for Pydantic AI agents. + +This module provides classes for integrating Pydantic AI agents with the AG-UI protocol, +enabling streaming event-based communication for interactive AI applications. +""" + +from __future__ import annotations + +import json +from collections.abc import AsyncIterator, Iterable +from typing import TYPE_CHECKING, Final + +from ...messages import ( + BuiltinToolCallPart, + BuiltinToolReturnPart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, + ToolReturnPart, +) +from ...tools import AgentDepsT +from .. import BaseEventStream + +if TYPE_CHECKING: + pass # Agent type is not actually used in this module + +try: + from ag_ui.core import ( + BaseEvent, + EventType, + RunAgentInput, + RunErrorEvent, + RunFinishedEvent, + RunStartedEvent, + TextMessageContentEvent, + TextMessageEndEvent, + TextMessageStartEvent, + ThinkingEndEvent, + ThinkingStartEvent, + ThinkingTextMessageContentEvent, + ThinkingTextMessageEndEvent, + ThinkingTextMessageStartEvent, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallResultEvent, + ToolCallStartEvent, + ) +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +__all__ = [ + 'AGUIEventStream', + 'RunAgentInput', + 'RunStartedEvent', + 'RunFinishedEvent', +] + +BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' + + +class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): + """Transforms Pydantic AI agent events into AG-UI protocol events. + + This class handles the stateful transformation of streaming agent events + into the AG-UI protocol format, managing message IDs, thinking mode state, + and tool call ID mappings for builtin tools. + + Example: + ```python + event_stream = AGUIEventStream() + async for ag_ui_event in event_stream.handle_event(pai_event): + print(ag_ui_event) + ``` + """ + + def __init__(self, request: RunAgentInput) -> None: + """Initialize AG-UI event stream state.""" + super().__init__(request) + self.part_end: BaseEvent | None = None + self.thinking: bool = False + self.builtin_tool_call_ids: dict[str, str] = {} + + async def before_stream(self) -> AsyncIterator[BaseEvent]: + """Yield events before agent streaming starts.""" + yield RunStartedEvent( + thread_id=self.request.thread_id, + run_id=self.request.run_id, + ) + + async def after_stream(self) -> AsyncIterator[BaseEvent]: + """Handle an AgentRunResultEvent, cleaning up any pending state.""" + # Emit any pending part end event + if self.part_end: # pragma: no branch + yield self.part_end + self.part_end = None + + # End thinking mode if still active + if self.thinking: + yield ThinkingEndEvent( + type=EventType.THINKING_END, + ) + self.thinking = False + + # Emit finish event + yield RunFinishedEvent( + thread_id=self.request.thread_id, + run_id=self.request.run_id, + ) + + async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: + """Handle errors during streaming.""" + # Try to get code from exception if it has one, otherwise use class name + code = getattr(error, 'code', error.__class__.__name__) + yield RunErrorEvent(message=str(error), code=code) + + # Granular handlers implementation + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: + """Handle a TextPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + message_id = self.new_message_id() + yield TextMessageStartEvent(message_id=message_id) + if part.content: # pragma: no branch + yield TextMessageContentEvent(message_id=message_id, delta=part.content) + self.part_end = TextMessageEndEvent(message_id=message_id) + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a TextPartDelta.""" + if delta.content_delta: # pragma: no branch + yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if not self.thinking: + yield ThinkingStartEvent(type=EventType.THINKING_START) + self.thinking = True + + if part.content: + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPartDelta.""" + if delta.content_delta: # pragma: no branch + if not isinstance(self.part_end, ThinkingTextMessageEndEvent): + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + yield ThinkingTextMessageContentEvent( + type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta + ) + + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + tool_call_id = part.tool_call_id + if isinstance(part, BuiltinToolCallPart): + builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + + message_id = self.message_id or self.new_message_id() + yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) + if part.args: + yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) + self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) + + def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolCallPart at start.""" + return self.handle_tool_call_start(part) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPartDelta.""" + tool_call_id = delta.tool_call_id + assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' + if tool_call_id in self.builtin_tool_call_ids: + tool_call_id = self.builtin_tool_call_ids[tool_call_id] + yield ToolCallArgsEvent( + tool_call_id=tool_call_id, + delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), + ) + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolReturnPart.""" + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=tool_call_id, + content=part.model_response_str(), + ) + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolCallEvent. + + This event is emitted when a function tool is called, but no AG-UI events + are needed at this stage since tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolResultEvent, emitting tool result events.""" + result = event.result + if not isinstance(result, ToolReturnPart): + return + + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=result.tool_call_id, + content=result.model_response_str(), + ) + + # Check for AG-UI events returned by tool calls. + possible_event = result.metadata or result.content + if isinstance(possible_event, BaseEvent): + yield possible_event + elif isinstance(possible_event, str | bytes): # pragma: no branch + # Avoid iterable check for strings and bytes. + pass + elif isinstance(possible_event, Iterable): # pragma: no branch + for item in possible_event: # type: ignore[reportUnknownMemberType] + if isinstance(item, BaseEvent): # pragma: no branch + yield item + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FinalResultEvent. + + This event is emitted when the agent produces a final result, but no AG-UI events + are needed at this stage. + """ + return + yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py deleted file mode 100644 index e6ffa784f3..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py +++ /dev/null @@ -1,334 +0,0 @@ -"""AG-UI adapter for handling requests.""" - -# pyright: reportGeneralTypeIssues=false, reportInvalidTypeArguments=false - -from __future__ import annotations - -import json -from dataclasses import dataclass -from http import HTTPStatus -from typing import TYPE_CHECKING, Any - -from pydantic import BaseModel, ValidationError - -from ...tools import AgentDepsT -from .event_stream import ( - AGUIEventStream, - RunAgentInput, - StateHandler, - _AGUIFrontendToolset, # type: ignore[reportPrivateUsage] - _InvalidStateError, # type: ignore[reportPrivateUsage] - _NoMessagesError, # type: ignore[reportPrivateUsage] - _RunError, # type: ignore[reportPrivateUsage] - protocol_messages_to_pai_messages, -) - -if TYPE_CHECKING: - from ...agent import Agent - -__all__ = ['AGUIAdapter'] - - -@dataclass -class AGUIAdapter: - """Adapter for handling AG-UI protocol requests with Pydantic AI agents. - - This adapter provides an interface for integrating Pydantic AI agents - with the AG-UI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.ag_ui import AGUIAdapter - - agent = Agent('openai:gpt-4') - adapter = AGUIAdapter(agent) - - async def handle_request(request: RunAgentInput, deps=None): - async for event_str in adapter.run_stream_sse(request, deps): - yield event_str - ``` - """ - - agent: Agent[AgentDepsT] - """The Pydantic AI agent to run.""" - - async def run_stream( # noqa: C901 - self, - request: RunAgentInput, - deps: AgentDepsT | None = None, - *, - output_type: Any = None, - model: Any = None, - model_settings: Any = None, - usage_limits: Any = None, - usage: Any = None, - infer_name: bool = True, - toolsets: Any = None, - on_complete: Any = None, - ): - """Stream events from an agent run as AG-UI protocol events. - - This method provides a complete implementation with all AG-UI features including: - - Frontend tools handling - - State injection - - Error handling (validation vs stream errors) - - on_complete callback - - RunStarted and RunFinished events - - Args: - request: The AG-UI request data. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. - - Yields: - AG-UI protocol events (BaseEvent subclasses). - - Raises: - _RunError: If request validation fails or other errors occur. - """ - from ... import _utils - from ...exceptions import UserError - from ...tools import DeferredToolRequests - from .event_stream import RunFinishedEvent, RunStartedEvent - - # Create event stream - event_stream = self.create_event_stream() - stream_started = False - - # Handle frontend tools - if request.tools: - toolset = _AGUIFrontendToolset[AgentDepsT](request.tools) - toolsets = [*toolsets, toolset] if toolsets else [toolset] - - try: - # Emit start event - yield RunStartedEvent( - thread_id=request.thread_id, - run_id=request.run_id, - ) - stream_started = True - - if not request.messages: - raise _NoMessagesError - - # Handle state injection - raw_state: dict[str, Any] = request.state or {} - if isinstance(deps, StateHandler): - if isinstance(deps.state, BaseModel): - try: - state = type(deps.state).model_validate(raw_state) - except ValidationError as e: # pragma: no cover - raise _InvalidStateError from e - else: - state = raw_state - - from dataclasses import replace - - deps = replace(deps, state=state) - elif raw_state: - raise UserError( - f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' - ) - - # Convert AG-UI messages to pAI messages - messages = protocol_messages_to_pai_messages(request.messages) - - # Run agent and stream events - result = None - async for event in self.agent.run_stream_events( - user_prompt=None, - output_type=[output_type or self.agent.output_type, DeferredToolRequests], - message_history=messages, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - ): - from ...run import AgentRunResultEvent - - # Capture result for on_complete callback - if isinstance(event, AgentRunResultEvent): - result = event.result - - # Transform pAI events to AG-UI events - async for ag_ui_event in event_stream.agent_event_to_events(event): # type: ignore[arg-type] - yield ag_ui_event - - # Call on_complete callback - if on_complete is not None and result is not None: - if _utils.is_async_callable(on_complete): - await on_complete(result) - else: - await _utils.run_in_executor(on_complete, result) - - except _RunError as e: - if stream_started: - async for error_event in event_stream.on_stream_error(e): - yield error_event - else: - async for error_event in event_stream.on_validation_error(e): - yield error_event - raise - except Exception as e: - if stream_started: - async for error_event in event_stream.on_stream_error(e): - yield error_event - else: - async for error_event in event_stream.on_validation_error(e): - yield error_event - raise - else: - # Emit finish event - yield RunFinishedEvent( - thread_id=request.thread_id, - run_id=request.run_id, - ) - - async def run_stream_sse( - self, - request: RunAgentInput, - accept: str, - *, - output_type: Any = None, - model: Any = None, - deps: AgentDepsT | None = None, - model_settings: Any = None, - usage_limits: Any = None, - usage: Any = None, - infer_name: bool = True, - toolsets: Any = None, - on_complete: Any = None, - ): - """Stream SSE-encoded events from an agent run. - - This method wraps `run_stream` and encodes the events as SSE strings. - - Args: - request: The AG-UI request data. - accept: The accept header value for encoding. - output_type: Custom output type for this run. - model: Optional model to use for this run. - deps: Optional dependencies to pass to the agent. - model_settings: Optional settings for the model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. - - Yields: - SSE-formatted strings. - """ - from ag_ui.encoder import EventEncoder - - encoder = EventEncoder(accept=accept) - - try: - async for event in self.run_stream( - request=request, - deps=deps, - output_type=output_type, - model=model, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ): - yield encoder.encode(event) - except _RunError: - # Error events are already yielded by run_stream - # This shouldn't actually be reached since run_stream yields error events before raising - pass - except Exception: - # Let other exceptions propagate - raise - - async def dispatch_request( - self, - request: Any, - deps: AgentDepsT | None = None, - *, - output_type: Any = None, - model: Any = None, - model_settings: Any = None, - usage_limits: Any = None, - usage: Any = None, - infer_name: bool = True, - toolsets: Any = None, - on_complete: Any = None, - ) -> Any: - """Handle an AG-UI request and return a streaming response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. - - Returns: - A streaming Starlette response with AG-UI protocol events. - """ - try: - from starlette.requests import Request - from starlette.responses import Response, StreamingResponse - except ImportError as e: # pragma: no cover - raise ImportError('Please install starlette to use dispatch_request') from e - - if not isinstance(request, Request): # pragma: no cover - raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') - - accept = request.headers.get('accept', 'text/event-stream') - - try: - input_data = RunAgentInput.model_validate(await request.json()) - except ValidationError as e: # pragma: no cover - return Response( - content=json.dumps(e.json()), - media_type='application/json', - status_code=HTTPStatus.UNPROCESSABLE_ENTITY, - ) - - return StreamingResponse( - self.run_stream_sse( - request=input_data, - accept=accept, - deps=deps, - output_type=output_type, - model=model, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ), - media_type=accept, - ) - - def create_event_stream(self) -> AGUIEventStream[AgentDepsT]: - """Create a new AG-UI event stream. - - Returns: - An AGUIEventStream instance. - """ - return AGUIEventStream[AgentDepsT]() diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py deleted file mode 100644 index 5ce69abfda..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py +++ /dev/null @@ -1,548 +0,0 @@ -"""AG-UI protocol adapter for Pydantic AI agents. - -This module provides classes for integrating Pydantic AI agents with the AG-UI protocol, -enabling streaming event-based communication for interactive AI applications. -""" - -# pyright: reportIncompatibleMethodOverride=false, reportUnusedClass=false, reportGeneralTypeIssues=false, reportInvalidTypeArguments=false - -from __future__ import annotations - -import json -from collections.abc import AsyncIterator, Iterable -from dataclasses import Field, dataclass -from typing import TYPE_CHECKING, Any, ClassVar, Final, Generic, Protocol, TypeVar, runtime_checkable - -if TYPE_CHECKING: - pass # Agent type is not actually used in this module - -from pydantic import BaseModel, ValidationError - -from ...messages import ( - BuiltinToolCallEvent, # type: ignore[reportDeprecated] - BuiltinToolCallPart, - BuiltinToolResultEvent, # type: ignore[reportDeprecated] - BuiltinToolReturnPart, - FinalResultEvent, - FunctionToolCallEvent, - FunctionToolResultEvent, - ModelMessage, - TextPart, - TextPartDelta, - ThinkingPart, - ThinkingPartDelta, - ToolCallPart, - ToolCallPartDelta, - ToolReturnPart, -) -from ...run import AgentRunResultEvent -from ...tools import AgentDepsT, ToolDefinition -from ...toolsets.external import ExternalToolset -from .. import BaseEventStream - -try: - from ag_ui.core import ( - AssistantMessage, - BaseEvent, - DeveloperMessage, - EventType, - Message, - RunAgentInput, - RunErrorEvent, - RunFinishedEvent, - RunStartedEvent, - SystemMessage, - TextMessageContentEvent, - TextMessageEndEvent, - TextMessageStartEvent, - ThinkingEndEvent, - ThinkingStartEvent, - ThinkingTextMessageContentEvent, - ThinkingTextMessageEndEvent, - ThinkingTextMessageStartEvent, - Tool as AGUITool, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallResultEvent, - ToolCallStartEvent, - ToolMessage, - UserMessage, - ) -except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' - 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' - ) from e - -__all__ = [ - 'AGUIEventStream', - 'StateHandler', - 'StateDeps', - 'protocol_messages_to_pai_messages', - '_AGUIFrontendToolset', - '_NoMessagesError', - '_InvalidStateError', - '_RunError', - 'RunAgentInput', - 'RunStartedEvent', - 'RunFinishedEvent', -] - -_BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' - - -# State management types - -StateT = TypeVar('StateT', bound=BaseModel) -"""Type variable for the state type, which must be a subclass of `BaseModel`.""" - - -@runtime_checkable -class StateHandler(Protocol): - """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" - - # Has to be a dataclass so we can use `replace` to update the state. - # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 - __dataclass_fields__: ClassVar[dict[str, Field[Any]]] - - @property - def state(self) -> Any: - """Get the current state of the agent run.""" - ... - - @state.setter - def state(self, state: Any) -> None: - """Set the state of the agent run. - - This method is called to update the state of the agent run with the - provided state. - - Args: - state: The run state. - - Raises: - InvalidStateError: If `state` does not match the expected model. - """ - ... - - -@dataclass -class StateDeps(Generic[StateT]): - """Provides AG-UI state management. - - This class is used to manage the state of an agent run. It allows setting - the state of the agent run with a specific type of state model, which must - be a subclass of `BaseModel`. - - The state is set using the `state` setter by the `Adapter` when the run starts. - - Implements the `StateHandler` protocol. - """ - - state: StateT - - -# Error types - - -@dataclass -class _RunError(Exception): - """Exception raised for errors during agent runs.""" - - message: str - code: str - - def __str__(self) -> str: # pragma: no cover - return self.message - - -@dataclass -class _NoMessagesError(_RunError): - """Exception raised when no messages are found in the input.""" - - message: str = 'no messages found in the input' - code: str = 'no_messages' - - -@dataclass -class _InvalidStateError(_RunError, ValidationError): - """Exception raised when an invalid state is provided.""" - - message: str = 'invalid state provided' - code: str = 'invalid_state' - - -class _ToolCallNotFoundError(_RunError, ValueError): - """Exception raised when an tool result is present without a matching call.""" - - def __init__(self, tool_call_id: str) -> None: - """Initialize the exception with the tool call ID.""" - super().__init__( # pragma: no cover - message=f'Tool call with ID {tool_call_id} not found in the history.', - code='tool_call_not_found', - ) - - -# Frontend toolset - - -class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): - """Toolset for AG-UI frontend tools.""" - - def __init__(self, tools: list[AGUITool]): - """Initialize the toolset with AG-UI tools. - - Args: - tools: List of AG-UI tool definitions. - """ - super().__init__( - [ - ToolDefinition( - name=tool.name, - description=tool.description, - parameters_json_schema=tool.parameters, - ) - for tool in tools - ] - ) - - @property - def label(self) -> str: - """Return the label for this toolset.""" - return 'the AG-UI frontend tools' # pragma: no cover - - -class AGUIEventStream(BaseEventStream[BaseEvent, AgentDepsT]): - """Transforms Pydantic AI agent events into AG-UI protocol events. - - This class handles the stateful transformation of streaming agent events - into the AG-UI protocol format, managing message IDs, thinking mode state, - and tool call ID mappings for builtin tools. - - Example: - ```python - event_stream = AGUIEventStream() - async for ag_ui_event in event_stream.agent_event_to_events(pai_event): - print(ag_ui_event) - ``` - """ - - def __init__(self) -> None: - """Initialize AG-UI event stream state.""" - super().__init__() - self.part_end: BaseEvent | None = None - self.thinking: bool = False - self.builtin_tool_call_ids: dict[str, str] = {} - - # Granular handlers implementation - - async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: - """Handle a TextPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if self.thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False - - message_id = self.new_message_id() - yield TextMessageStartEvent(message_id=message_id) - if part.content: # pragma: no branch - yield TextMessageContentEvent(message_id=message_id, delta=part.content) - self.part_end = TextMessageEndEvent(message_id=message_id) - - async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a TextPartDelta.""" - if delta.content_delta: # pragma: no branch - yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) - - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if not self.thinking: - yield ThinkingStartEvent(type=EventType.THINKING_START) - self.thinking = True - - if part.content: - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) - - async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPartDelta.""" - if delta.content_delta: # pragma: no branch - if not isinstance(self.part_end, ThinkingTextMessageEndEvent): - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) - - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta - ) - - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if self.thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False - - tool_call_id = part.tool_call_id - if isinstance(part, BuiltinToolCallPart): - builtin_tool_call_id = '|'.join([_BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) - self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id - tool_call_id = builtin_tool_call_id - - message_id = self.message_id or self.new_message_id() - yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) - if part.args: - yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) - self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) - - async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPartDelta.""" - tool_call_id = delta.tool_call_id - assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' - if tool_call_id in self.builtin_tool_call_ids: - tool_call_id = self.builtin_tool_call_ids[tool_call_id] - yield ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), - ) - - async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: - """Handle a BuiltinToolReturnPart.""" - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - - tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] - yield ToolCallResultEvent( - message_id=self.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=tool_call_id, - content=part.model_response_str(), - ) - - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: - """Handle a FunctionToolCallEvent. - - This event is emitted when a function tool is called, but no AG-UI events - are needed at this stage since tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FunctionToolResultEvent, emitting tool result events.""" - result = event.result - if not isinstance(result, ToolReturnPart): - return - - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - - yield ToolCallResultEvent( - message_id=self.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=result.tool_call_id, - content=result.model_response_str(), - ) - - # Check for AG-UI events returned by tool calls. - possible_event = result.metadata or result.content - if isinstance(possible_event, BaseEvent): - yield possible_event - elif isinstance(possible_event, str | bytes): # pragma: no branch - # Avoid iterable check for strings and bytes. - pass - elif isinstance(possible_event, Iterable): # pragma: no branch - for item in possible_event: # type: ignore[reportUnknownMemberType] - if isinstance(item, BaseEvent): # pragma: no branch - yield item - - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolCallEvent. - - This event is emitted when a builtin tool is called, but no AG-UI events - are needed at this stage since builtin tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolResultEvent. - - This event is emitted when a builtin tool returns. We need to emit any pending - part_end event (TOOL_CALL_END) before the result is shown in handle_builtin_tool_return. - """ - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FinalResultEvent. - - This event is emitted when the agent produces a final result, but no AG-UI events - are needed at this stage. - """ - return - yield # Make this an async generator - - async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[BaseEvent]: - """Handle an AgentRunResultEvent, cleaning up any pending state.""" - # Emit any pending part end event - if self.part_end: # pragma: no branch - yield self.part_end - self.part_end = None - - # End thinking mode if still active - if self.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - self.thinking = False - - async def on_validation_error(self, error: Exception) -> AsyncIterator[BaseEvent]: - """Handle validation errors before stream starts.""" - # Try to get code from exception if it has one, otherwise use class name - code = getattr(error, 'code', error.__class__.__name__) - yield RunErrorEvent(message=str(error), code=code) - - async def on_stream_error(self, error: Exception) -> AsyncIterator[BaseEvent]: - """Handle errors during streaming.""" - # Try to get code from exception if it has one, otherwise use class name - code = getattr(error, 'code', error.__class__.__name__) - yield RunErrorEvent(message=str(error), code=code) - - def encode_event(self, event: BaseEvent, accept: str) -> str: - """Encode an AG-UI event as SSE. - - Args: - event: The AG-UI event to encode. - accept: The accept header value for encoding format. - - Returns: - The SSE-formatted string. - """ - from ag_ui.encoder import EventEncoder - - encoder = EventEncoder(accept=accept) - return encoder.encode(event) - - -def protocol_messages_to_pai_messages(messages: list[Message]) -> list[ModelMessage]: - """Convert AG-UI messages to Pydantic AI messages. - - Args: - messages: List of AG-UI messages. - - Returns: - List of Pydantic AI ModelMessage objects. - """ - from ...messages import ( - ModelRequest, - ModelRequestPart, - ModelResponse, - ModelResponsePart, - SystemPromptPart, - UserPromptPart, - ) - - result: list[ModelMessage] = [] - tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. - request_parts: list[ModelRequestPart] | None = None - response_parts: list[ModelResponsePart] | None = None - - for msg in messages: - if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( - isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if request_parts is None: - request_parts = [] - result.append(ModelRequest(parts=request_parts)) - response_parts = None - - if isinstance(msg, UserMessage): - request_parts.append(UserPromptPart(content=msg.content)) - elif isinstance(msg, SystemMessage | DeveloperMessage): - request_parts.append(SystemPromptPart(content=msg.content)) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') - - request_parts.append( - ToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - ) - ) - - elif isinstance(msg, AssistantMessage) or ( # pragma: no branch - isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if response_parts is None: - response_parts = [] - result.append(ModelResponse(parts=response_parts)) - request_parts = None - - if isinstance(msg, AssistantMessage): - if msg.content: - response_parts.append(TextPart(content=msg.content)) - - if msg.tool_calls: - for tool_call in msg.tool_calls: - tool_call_id = tool_call.id - tool_name = tool_call.function.name - tool_calls[tool_call_id] = tool_name - - if tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX): - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - response_parts.append( - BuiltinToolCallPart( - tool_name=tool_name, - args=tool_call.function.arguments, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - else: - response_parts.append( - ToolCallPart( - tool_name=tool_name, - tool_call_id=tool_call_id, - args=tool_call.function.arguments, - ) - ) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - - response_parts.append( - BuiltinToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - - return result diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py new file mode 100644 index 0000000000..927e910409 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -0,0 +1,377 @@ +"""Base classes for UI event stream protocols. + +This module provides abstract base classes for implementing UI event stream adapters +that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). +""" + +from __future__ import annotations + +from abc import ABC +from collections.abc import AsyncIterator +from dataclasses import dataclass, field +from typing import Generic, TypeVar +from uuid import uuid4 + +from ..messages import ( + AgentStreamEvent, + BuiltinToolCallPart, + BuiltinToolReturnPart, + FilePart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + PartDeltaEvent, + PartStartEvent, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, +) +from ..run import AgentRunResult, AgentRunResultEvent +from ..tools import AgentDepsT + +__all__ = [ + 'BaseEventStream', +] + +EventT = TypeVar('EventT') +"""Type variable for protocol-specific event types.""" + +RequestT = TypeVar('RequestT') +"""Type variable for request types.""" + +SourceEvent = AgentStreamEvent | AgentRunResultEvent + + +@dataclass +class BaseEventStream(ABC, Generic[RequestT, EventT, AgentDepsT]): + """Base class for transforming pAI agent events into protocol-specific events. + + This class provides a granular method-per-part-type pattern that makes it easy to customize + specific event transformations by overriding individual handler methods. + + Subclasses should: + 1. Initialize state attributes in `__init__` if needed + 2. Implement all abstract `handle_*` methods for event transformation + 3. Implement error handling methods + 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) + + Example: + ```python + class MyEventStream(BaseEventStream[MyEvent, MyDeps]): + def __init__(self): + super().__init__() + self.custom_state = {} + + async def handle_text_start(self, part: TextPart): + yield MyTextStartEvent(id=self.new_message_id(), text=part.content) + ``` + """ + + request: RequestT + + result: AgentRunResult | None = None + + message_id: str = field(default_factory=lambda: str(uuid4())) + + def new_message_id(self) -> str: + """Generate and store a new message ID. + + Returns: + A new UUID-based message ID. + """ + self.message_id = str(uuid4()) + return self.message_id + + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: + """Handle a stream of agent events. + + Args: + stream: The stream of agent events to handle. + + Yields: + Protocol-specific events. + """ + async for e in self.before_stream(): + yield e + + try: + async for event in stream: + async for e in self.handle_event(event): + yield e + except Exception as e: + async for e in self.on_error(e): + yield e + else: + async for e in self.after_stream(): + yield e + + async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 + """Transform a pAI agent event into protocol-specific events. + + This method dispatches to specific `handle_*` methods based on event and part type. + Subclasses should implement the individual handler methods rather than overriding this. + + Args: + event: The pAI agent event to transform. + + Yields: + Protocol-specific events. + """ + async for e in self.before_event(event): + yield e + + match event: + case PartStartEvent(part=part): + # Dispatch based on part type + match part: + case TextPart(): + async for e in self.handle_text_start(part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_start(part): + yield e + case ToolCallPart(): + async for e in self.handle_tool_call_start(part): + yield e + case BuiltinToolCallPart(): + async for e in self.handle_builtin_tool_call_start(part): + yield e + case BuiltinToolReturnPart(): + async for e in self.handle_builtin_tool_return(part): + yield e + case FilePart(): + # FilePart is not currently handled by UI protocols + pass + case PartDeltaEvent(delta=delta): + # Dispatch based on delta type + match delta: + case TextPartDelta(): + async for e in self.handle_text_delta(delta): + yield e + case ThinkingPartDelta(): + async for e in self.handle_thinking_delta(delta): + yield e + case ToolCallPartDelta(): + async for e in self.handle_tool_call_delta(delta): + yield e + case FunctionToolCallEvent(): + async for e in self.handle_function_tool_call(event): + yield e + case FunctionToolResultEvent(): + async for e in self.handle_function_tool_result(event): + yield e + case FinalResultEvent(): + async for e in self.handle_final_result(event): + yield e + case AgentRunResultEvent(): + self.result = event.result + async for e in self.handle_run_result(event): + yield e + case _: + pass + + async for e in self.after_event(event): + yield e + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: + """Handle a TextPart at start. + + Args: + part: The TextPart. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: + """Handle a TextPartDelta. + + Args: + delta: The TextPartDelta. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: + """Handle a ThinkingPart at start. + + Args: + part: The ThinkingPart. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: + """Handle a ThinkingPartDelta. + + Args: + delta: The ThinkingPartDelta. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[EventT]: + """Handle a ToolCallPart at start. + + Args: + part: The tool call part. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle a BuiltinToolCallPart at start. + + Args: + part: The tool call part. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: + """Handle a ToolCallPartDelta. + + Args: + delta: The ToolCallPartDelta. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: + """Handle a BuiltinToolReturnPart. + + Args: + part: The BuiltinToolReturnPart. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolCallEvent. + + Args: + event: The function tool call event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolResultEvent. + + Args: + event: The function tool result event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: + """Handle a FinalResultEvent. + + Args: + event: The final result event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: + """Handle an AgentRunResultEvent (final event with result/usage). + + Args: + event: The agent run result event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def before_event(self, event: SourceEvent) -> AsyncIterator[EventT]: + """Handle an event before it is processed. + + Args: + event: The event to handle. + """ + return + yield # Make this an async generator + + async def after_event(self, event: SourceEvent) -> AsyncIterator[EventT]: + """Handle an event after it is processed. + + Args: + event: The event to handle. + """ + return + yield # Make this an async generator + + # Lifecycle hooks (optional overrides) + + async def before_stream(self) -> AsyncIterator[EventT]: + """Yield events before agent streaming starts. + + This hook is called before any agent events are processed. + Override this to inject custom events at the start of the stream. + + Yields: + Protocol-specific events to emit before streaming. + """ + return + yield # Make this an async generator + + async def after_stream(self) -> AsyncIterator[EventT]: + """Yield events after agent streaming completes. + + This hook is called after all agent events have been processed. + Override this to inject custom events at the end of the stream. + + Yields: + Protocol-specific events to emit after streaming. + """ + return + yield # Make this an async generator + + async def on_error(self, error: Exception) -> AsyncIterator[EventT]: + """Handle errors that occur during streaming (after stream has started). + + Args: + error: The error that occurred during streaming. + + Yields: + Protocol-specific error events. + """ + return + yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index f004e15579..e0788a1404 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -7,10 +7,9 @@ https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts """ -from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata -from .adapter import VercelAIAdapter -from .event_stream import VercelAIEventStream, protocol_messages_to_pai_messages -from .request_types import ( +from ._adapter import VercelAIAdapter +from ._event_stream import VercelAIEventStream +from ._request_types import ( RequestData, SubmitMessage, TextUIPart, @@ -19,11 +18,10 @@ UIPart, request_data_ta, ) -from .response_types import ( +from ._response_types import ( AbortChunk, - AbstractSSEChunk, + BaseChunk, DataUIMessageChunk, - DoneChunk, ErrorChunk, FileChunk, FinishChunk, @@ -46,6 +44,7 @@ ToolOutputAvailableChunk, ToolOutputErrorChunk, ) +from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata __all__ = [ # Utilities @@ -62,7 +61,7 @@ 'SubmitMessage', 'request_data_ta', # Response types - 'AbstractSSEChunk', + 'BaseChunk', 'TextStartChunk', 'TextDeltaChunk', 'TextEndChunk', @@ -86,9 +85,7 @@ 'FinishChunk', 'AbortChunk', 'MessageMetadataChunk', - 'DoneChunk', # Event stream and adapter 'VercelAIEventStream', 'VercelAIAdapter', - 'protocol_messages_to_pai_messages', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py new file mode 100644 index 0000000000..8f3053ff89 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -0,0 +1,183 @@ +"""Vercel AI adapter for handling requests.""" + +from __future__ import annotations + +from collections.abc import Sequence +from dataclasses import dataclass +from functools import cached_property +from typing import ( + Any, +) + +from ... import DeferredToolResults +from ...agent import AbstractAgent, AgentDepsT +from ...messages import ( + ModelMessage, + ModelRequest, + ModelResponse, + SystemPromptPart, + TextPart, + UserPromptPart, +) +from ...models import KnownModelName, Model +from ...output import OutputSpec +from ...settings import ModelSettings +from ...toolsets import AbstractToolset +from ...usage import RunUsage, UsageLimits +from ..adapter import BaseAdapter, OnCompleteFunc +from ..event_stream import BaseEventStream +from ._event_stream import VercelAIEventStream +from ._request_types import RequestData, TextUIPart, UIMessage, request_data_ta +from ._response_types import BaseChunk +from ._utils import VERCEL_AI_DSP_HEADERS + +try: + from starlette.requests import Request + from starlette.responses import Response +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + + +__all__ = ['VercelAIAdapter'] + + +@dataclass +class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT]): + """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. + + This adapter provides a simplified interface for integrating Pydantic AI agents + with the Vercel AI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.vercel_ai import VercelAIAdapter + + agent = Agent('openai:gpt-4') + adapter = VercelAIAdapter(agent) + + async def handle_request(data: RequestData, deps=None): + async for chunk in adapter.run_stream(data, deps): + yield chunk.sse() + ``` + """ + + def create_event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: + return VercelAIEventStream(self.request) + + def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: + return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' + + @classmethod + async def validate_request(cls, request: Request) -> RequestData: + """Validate a Vercel AI request.""" + return request_data_ta.validate_json(await request.json()) + + @classmethod + async def dispatch_request( + cls, + agent: AbstractAgent[AgentDepsT, Any], + request: Request, + *, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + output_type: OutputSpec[Any] | None = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + on_complete: OnCompleteFunc | None = None, + ) -> Response: + """Handle an AG-UI request and return a streaming response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Returns: + A streaming Starlette response with AG-UI protocol events. + """ + response = await super().dispatch_request( + agent, + request, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + output_type=output_type, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ) + response.headers.update(VERCEL_AI_DSP_HEADERS) + return response + + @cached_property + def messages(self) -> list[ModelMessage]: + """Convert Vercel AI protocol messages to Pydantic AI messages. + + Args: + messages: List of Vercel AI UIMessage objects. + + Returns: + List of Pydantic AI ModelMessage objects. + + Raises: + ValueError: If message format is not supported. + """ + pai_messages: list[ModelMessage] = [] + + for msg in self.request.messages: + if msg.role == 'user': + # User message - extract text from parts + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + else: + raise ValueError(f'Only text parts are supported for user messages, got {type(part).__name__}') + + if texts: + pai_messages.append(ModelRequest(parts=[UserPromptPart(content='\n'.join(texts))])) + + elif msg.role == 'assistant': + # Assistant message - for now, just extract text + # Full reconstruction of ModelResponse with tool calls would require more complex logic + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + # TODO: Handle ToolOutputAvailablePart for full message history reconstruction + + if texts: + pai_messages.append(ModelResponse(parts=[TextPart(content='\n'.join(texts))])) + + elif msg.role == 'system': + # System message - not in standard Vercel AI protocol but might be custom + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + + if texts: + pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) + + return pai_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py similarity index 61% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 465a3bb7e8..26cab17894 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -17,7 +17,6 @@ FinalResultEvent, FunctionToolCallEvent, FunctionToolResultEvent, - ModelMessage, RetryPromptPart, TextPart, TextPartDelta, @@ -27,13 +26,11 @@ ToolCallPartDelta, ToolReturnPart, ) -from ...run import AgentRunResultEvent from ...tools import AgentDepsT from .. import BaseEventStream -from .request_types import TextUIPart, UIMessage -from .response_types import ( - AbstractSSEChunk, - DoneChunk, +from ._request_types import RequestData +from ._response_types import ( + BaseChunk, ErrorChunk, FinishChunk, ReasoningDeltaChunk, @@ -45,7 +42,7 @@ ToolOutputAvailableChunk, ) -__all__ = ['VercelAIEventStream', 'protocol_messages_to_pai_messages'] +__all__ = ['VercelAIEventStream'] def _json_dumps(obj: Any) -> str: @@ -53,7 +50,7 @@ def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') -class VercelAIEventStream(BaseEventStream[AbstractSSEChunk | DoneChunk, AgentDepsT]): +class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): """Transforms Pydantic AI agent events into Vercel AI protocol events. This class handles the stateful transformation of streaming agent events @@ -62,31 +59,41 @@ class VercelAIEventStream(BaseEventStream[AbstractSSEChunk | DoneChunk, AgentDep Example: ```python event_stream = VercelAIEventStream() - async for vercel_event in event_stream.agent_event_to_events(pai_event): + async for vercel_event in event_stream.handle_event(pai_event): print(vercel_event.sse()) ``` """ - def __init__(self) -> None: + def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" - super().__init__() - self.new_message_id() # Generate a message ID at initialization + super().__init__(request) self._final_result_tool_id: str | None = None + async def after_stream(self) -> AsyncIterator[BaseChunk]: + """Yield events after agent streaming completes.""" + # Close the final result tool if there was one + if tool_call_id := self._final_result_tool_id: + yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) + yield FinishChunk() + + async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: + """Handle errors during streaming.""" + yield ErrorChunk(error_text=str(error)) + # Granular handlers implementation - async def handle_text_start(self, part: TextPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" yield TextStartChunk(id=self.message_id or self.new_message_id()) if part.content: yield TextDeltaChunk(id=self.message_id, delta=part.content) - async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[AbstractSSEChunk]: + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]: """Handle a TextPartDelta.""" if delta.content_delta: yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPart at start.""" if not self.message_id: self.new_message_id() @@ -94,12 +101,12 @@ async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[Abstr if part.content: yield ReasoningDeltaChunk(id=self.message_id, delta=part.content) - async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[AbstractSSEChunk]: + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPartDelta.""" if delta.content_delta: yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) if isinstance(part.args, str): @@ -107,7 +114,10 @@ async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) elif part.args is not None: yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) - async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[AbstractSSEChunk]: + def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: + return self.handle_tool_call_start(part) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id or '' if isinstance(delta.args_delta, str): @@ -115,11 +125,11 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato elif delta.args_delta is not None: yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(delta.args_delta)) - async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolReturnPart.""" yield ToolOutputAvailableChunk(tool_call_id=part.tool_call_id, output=part.content) - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolCallEvent. No Vercel AI events are emitted at this stage since tool calls are handled in PartStartEvent. @@ -127,7 +137,7 @@ async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> Async return yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result if isinstance(result, ToolReturnPart): @@ -136,7 +146,7 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A # For retry prompts, emit the error content as tool output yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] """Handle a BuiltinToolCallEvent, emitting tool input events.""" part = event.part yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) @@ -145,91 +155,14 @@ async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIt elif part.args is not None: yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] """Handle a BuiltinToolResultEvent, emitting tool output events.""" result = event.result yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[AbstractSSEChunk]: + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FinalResultEvent, tracking the final result tool.""" if event.tool_call_id and event.tool_name: self._final_result_tool_id = event.tool_call_id + # TODO (DouweM): Stream output tool result once it's ready yield ToolInputStartChunk(tool_call_id=event.tool_call_id, tool_name=event.tool_name) - - async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[AbstractSSEChunk]: - """Handle an AgentRunResultEvent. - - No additional Vercel AI events are emitted at this stage. - """ - return - yield # Make this an async generator - - async def after_stream(self) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: - """Yield events after agent streaming completes.""" - # Close the final result tool if there was one - if tool_call_id := self._final_result_tool_id: - yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) - yield FinishChunk() - yield DoneChunk() - - async def on_validation_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: - """Handle validation errors before stream starts.""" - yield ErrorChunk(error_text=str(error)) - - async def on_stream_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: - """Handle errors during streaming.""" - yield ErrorChunk(error_text=str(error)) - - -def protocol_messages_to_pai_messages(messages: list[UIMessage]) -> list[ModelMessage]: - """Convert Vercel AI protocol messages to Pydantic AI messages. - - Args: - messages: List of Vercel AI UIMessage objects. - - Returns: - List of Pydantic AI ModelMessage objects. - - Raises: - ValueError: If message format is not supported. - """ - from ...messages import ModelRequest, ModelResponse, SystemPromptPart, TextPart, UserPromptPart - - pai_messages: list[ModelMessage] = [] - - for msg in messages: - if msg.role == 'user': - # User message - extract text from parts - texts: list[str] = [] - for part in msg.parts: - if isinstance(part, TextUIPart): - texts.append(part.text) - else: - raise ValueError(f'Only text parts are supported for user messages, got {type(part).__name__}') - - if texts: - pai_messages.append(ModelRequest(parts=[UserPromptPart(content='\n'.join(texts))])) - - elif msg.role == 'assistant': - # Assistant message - for now, just extract text - # Full reconstruction of ModelResponse with tool calls would require more complex logic - texts: list[str] = [] - for part in msg.parts: - if isinstance(part, TextUIPart): - texts.append(part.text) - # TODO: Handle ToolOutputAvailablePart for full message history reconstruction - - if texts: - pai_messages.append(ModelResponse(parts=[TextPart(content='\n'.join(texts))])) - - elif msg.role == 'system': - # System message - not in standard Vercel AI protocol but might be custom - texts: list[str] = [] - for part in msg.parts: - if isinstance(part, TextUIPart): - texts.append(part.text) - - if texts: - pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) - - return pai_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py similarity index 74% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py index 09b82c3a99..c2d6ab2055 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py @@ -9,7 +9,7 @@ from ._utils import CamelBaseModel, ProviderMetadata __all__ = [ - 'AbstractSSEChunk', + 'BaseChunk', 'TextStartChunk', 'TextDeltaChunk', 'TextEndChunk', @@ -33,19 +33,14 @@ 'FinishChunk', 'AbortChunk', 'MessageMetadataChunk', - 'DoneChunk', ] -class AbstractSSEChunk(CamelBaseModel): +class BaseChunk(CamelBaseModel): """Abstract base class for response SSE events.""" - def sse(self) -> str: - """Encode as SSE format.""" - return self.model_dump_json(exclude_none=True, by_alias=True) - -class TextStartChunk(AbstractSSEChunk): +class TextStartChunk(BaseChunk): """Text start chunk.""" type: Literal['text-start'] = 'text-start' @@ -53,7 +48,7 @@ class TextStartChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class TextDeltaChunk(AbstractSSEChunk): +class TextDeltaChunk(BaseChunk): """Text delta chunk.""" type: Literal['text-delta'] = 'text-delta' @@ -62,7 +57,7 @@ class TextDeltaChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class TextEndChunk(AbstractSSEChunk): +class TextEndChunk(BaseChunk): """Text end chunk.""" type: Literal['text-end'] = 'text-end' @@ -70,7 +65,7 @@ class TextEndChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ReasoningStartChunk(AbstractSSEChunk): +class ReasoningStartChunk(BaseChunk): """Reasoning start chunk.""" type: Literal['reasoning-start'] = 'reasoning-start' @@ -78,7 +73,7 @@ class ReasoningStartChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ReasoningDeltaChunk(AbstractSSEChunk): +class ReasoningDeltaChunk(BaseChunk): """Reasoning delta chunk.""" type: Literal['reasoning-delta'] = 'reasoning-delta' @@ -87,7 +82,7 @@ class ReasoningDeltaChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ReasoningEndChunk(AbstractSSEChunk): +class ReasoningEndChunk(BaseChunk): """Reasoning end chunk.""" type: Literal['reasoning-end'] = 'reasoning-end' @@ -95,14 +90,14 @@ class ReasoningEndChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ErrorChunk(AbstractSSEChunk): +class ErrorChunk(BaseChunk): """Error chunk.""" type: Literal['error'] = 'error' error_text: str -class ToolInputStartChunk(AbstractSSEChunk): +class ToolInputStartChunk(BaseChunk): """Tool input start chunk.""" type: Literal['tool-input-start'] = 'tool-input-start' @@ -112,7 +107,7 @@ class ToolInputStartChunk(AbstractSSEChunk): dynamic: bool | None = None -class ToolInputDeltaChunk(AbstractSSEChunk): +class ToolInputDeltaChunk(BaseChunk): """Tool input delta chunk.""" type: Literal['tool-input-delta'] = 'tool-input-delta' @@ -120,7 +115,7 @@ class ToolInputDeltaChunk(AbstractSSEChunk): input_text_delta: str -class ToolOutputAvailableChunk(AbstractSSEChunk): +class ToolOutputAvailableChunk(BaseChunk): """Tool output available chunk.""" type: Literal['tool-output-available'] = 'tool-output-available' @@ -131,14 +126,14 @@ class ToolOutputAvailableChunk(AbstractSSEChunk): preliminary: bool | None = None -class FinishChunk(AbstractSSEChunk): +class FinishChunk(BaseChunk): """Finish chunk.""" type: Literal['finish'] = 'finish' message_metadata: Any | None = None -class ToolInputAvailableChunk(AbstractSSEChunk): +class ToolInputAvailableChunk(BaseChunk): """Tool input available chunk.""" type: Literal['tool-input-available'] = 'tool-input-available' @@ -150,7 +145,7 @@ class ToolInputAvailableChunk(AbstractSSEChunk): dynamic: bool | None = None -class ToolInputErrorChunk(AbstractSSEChunk): +class ToolInputErrorChunk(BaseChunk): """Tool input error chunk.""" type: Literal['tool-input-error'] = 'tool-input-error' @@ -163,7 +158,7 @@ class ToolInputErrorChunk(AbstractSSEChunk): error_text: str -class ToolOutputErrorChunk(AbstractSSEChunk): +class ToolOutputErrorChunk(BaseChunk): """Tool output error chunk.""" type: Literal['tool-output-error'] = 'tool-output-error' @@ -173,7 +168,7 @@ class ToolOutputErrorChunk(AbstractSSEChunk): dynamic: bool | None = None -class SourceUrlChunk(AbstractSSEChunk): +class SourceUrlChunk(BaseChunk): """Source URL chunk.""" type: Literal['source-url'] = 'source-url' @@ -183,7 +178,7 @@ class SourceUrlChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class SourceDocumentChunk(AbstractSSEChunk): +class SourceDocumentChunk(BaseChunk): """Source document chunk.""" type: Literal['source-document'] = 'source-document' @@ -194,7 +189,7 @@ class SourceDocumentChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class FileChunk(AbstractSSEChunk): +class FileChunk(BaseChunk): """File chunk.""" type: Literal['file'] = 'file' @@ -202,26 +197,26 @@ class FileChunk(AbstractSSEChunk): media_type: str -class DataUIMessageChunk(AbstractSSEChunk): +class DataUIMessageChunk(BaseChunk): """Data UI message chunk with dynamic type.""" type: str # Will be f"data-{NAME}" data: Any -class StartStepChunk(AbstractSSEChunk): +class StartStepChunk(BaseChunk): """Start step chunk.""" type: Literal['start-step'] = 'start-step' -class FinishStepChunk(AbstractSSEChunk): +class FinishStepChunk(BaseChunk): """Finish step chunk.""" type: Literal['finish-step'] = 'finish-step' -class StartChunk(AbstractSSEChunk): +class StartChunk(BaseChunk): """Start chunk.""" type: Literal['start'] = 'start' @@ -229,28 +224,14 @@ class StartChunk(AbstractSSEChunk): message_metadata: Any | None = None -class AbortChunk(AbstractSSEChunk): +class AbortChunk(BaseChunk): """Abort chunk.""" type: Literal['abort'] = 'abort' -class MessageMetadataChunk(AbstractSSEChunk): +class MessageMetadataChunk(BaseChunk): """Message metadata chunk.""" type: Literal['message-metadata'] = 'message-metadata' message_metadata: Any - - -class DoneChunk: - """Special marker chunk to indicate the end of the SSE stream.""" - - def sse(self) -> str: - """Encode as SSE done marker.""" - return '[DONE]' - - def __str__(self) -> str: - return 'DoneChunk' - - def __eq__(self, other: Any) -> bool: - return isinstance(other, DoneChunk) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py deleted file mode 100644 index c38b9e365c..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py +++ /dev/null @@ -1,149 +0,0 @@ -"""Vercel AI adapter for handling requests.""" - -# pyright: reportGeneralTypeIssues=false - -from __future__ import annotations - -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any - -from ...tools import AgentDepsT -from .event_stream import VercelAIEventStream -from .request_types import RequestData, TextUIPart, UIMessage -from .response_types import AbstractSSEChunk, DoneChunk - -if TYPE_CHECKING: - from ...agent import Agent - -__all__ = ['VercelAIAdapter'] - - -@dataclass -class VercelAIAdapter: - """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. - - This adapter provides a simplified interface for integrating Pydantic AI agents - with the Vercel AI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.vercel_ai import VercelAIAdapter - - agent = Agent('openai:gpt-4') - adapter = VercelAIAdapter(agent) - - async def handle_request(data: RequestData, deps=None): - async for chunk in adapter.run_stream(data, deps): - yield chunk.sse() - ``` - """ - - agent: Agent[AgentDepsT] - """The Pydantic AI agent to run.""" - - async def run_stream( - self, request: RequestData, deps: AgentDepsT | None = None - ) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: - """Stream events from an agent run as Vercel AI protocol events. - - Args: - request: The Vercel AI request data. - deps: Optional dependencies to pass to the agent. - - Yields: - Vercel AI protocol events (AbstractSSEChunk or DoneChunk). - - Raises: - ValueError: If request validation fails. - """ - # Parse messages from request - messages = self.parse_request_messages(request) - - # Extract prompt from last user message (for now, simple implementation) - if not messages: - raise ValueError('no messages provided') - - message = messages[-1] - prompt_parts: list[str] = [] - for part in message.parts: - if isinstance(part, TextUIPart): - prompt_parts.append(part.text) - else: - raise ValueError(f'Only text parts are supported yet, got {part}') - - # Create event stream - event_stream = self.create_event_stream() - - # Run agent and stream events - async for event in self.agent.run_stream_events('\n'.join(prompt_parts), deps=deps): - async for chunk in event_stream.agent_event_to_events(event): - yield chunk - - # Emit after-stream events - async for chunk in event_stream.after_stream(): - yield chunk - - def create_event_stream(self) -> VercelAIEventStream[AgentDepsT]: - """Create a new Vercel AI event stream. - - Returns: - A VercelAIEventStream instance. - """ - return VercelAIEventStream[AgentDepsT]() - - def parse_request_messages(self, request: RequestData) -> list[UIMessage]: - """Extract messages from the Vercel AI request. - - Args: - request: The Vercel AI request data. - - Returns: - List of UIMessage objects. - """ - return request.messages - - async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: - """Handle a request and return a streamed SSE response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - try: - from starlette.requests import Request - from starlette.responses import JSONResponse - except ImportError as e: # pragma: no cover - raise ImportError('Please install starlette to use dispatch_request') from e - - try: - from sse_starlette.sse import EventSourceResponse - except ImportError as e: # pragma: no cover - raise ImportError('Please install sse_starlette to use dispatch_request') from e - - from pydantic import ValidationError - - if not isinstance(request, Request): # pragma: no cover - raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') - - from .request_types import request_data_ta - - try: - data = request_data_ta.validate_json(await request.json()) - - async def run_sse() -> AsyncIterator[str]: - async for chunk in self.run_stream(data, deps=deps): - yield chunk.sse() - - from ._utils import VERCEL_AI_DSP_HEADERS - - return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) - except ValidationError as e: - return JSONResponse({'errors': e.errors()}, status_code=422) - except Exception as e: - return JSONResponse({'errors': str(e)}, status_code=500) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index bcb57d9158..e7e0be7746 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -48,9 +48,7 @@ from .conftest import IsDatetime, IsSameStr, try_import -has_ag_ui: bool = False with try_import() as imports_successful: - has_ag_ui = imports_successful() from ag_ui.core import ( AssistantMessage, CustomEvent, @@ -70,16 +68,16 @@ from pydantic_ai.ag_ui import ( SSE_CONTENT_TYPE, + AGUIAdapter, OnCompleteFunc, StateDeps, run_ag_ui, ) - from pydantic_ai.ui.ag_ui.event_stream import protocol_messages_to_pai_messages pytestmark = [ pytest.mark.anyio, - pytest.mark.skipif(not imports_successful, reason='ag-ui-protocol not installed'), + pytest.mark.skipif(not imports_successful(), reason='ag-ui-protocol not installed'), pytest.mark.filterwarnings( 'ignore:`BuiltinToolCallEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolCallPart` instead.:DeprecationWarning' ), @@ -257,7 +255,7 @@ async def stream_function( 'threadId': IsStr(), 'runId': IsStr(), }, - {'type': 'RUN_ERROR', 'message': 'no messages found in the input', 'code': 'no_messages'}, + {'type': 'RUN_ERROR', 'message': 'No messages provided', 'code': 'UserError'}, ] ) @@ -1097,7 +1095,7 @@ async def store_state( agent: Agent[StateDeps[StateInt], str] = Agent( model=FunctionModel(stream_function=simple_stream), - deps_type=StateDeps[StateInt], # type: ignore[reportUnknownArgumentType] + deps_type=StateDeps[StateInt], prepare_tools=store_state, ) @@ -1197,7 +1195,7 @@ async def test_concurrent_runs() -> None: agent: Agent[StateDeps[StateInt], str] = Agent( model=TestModel(), - deps_type=StateDeps[StateInt], # type: ignore[reportUnknownArgumentType] + deps_type=StateDeps[StateInt], ) @agent.tool @@ -1342,30 +1340,7 @@ async def async_callback(run_result: AgentRunResult[Any]) -> None: assert events[-1]['type'] == 'RUN_FINISHED' -async def test_callback_with_error() -> None: - """Test that callbacks are not called when errors occur.""" - - captured_results: list[AgentRunResult[Any]] = [] - - def error_callback(run_result: AgentRunResult[Any]) -> None: - captured_results.append(run_result) # pragma: no cover - - agent = Agent(TestModel()) - # Empty messages should cause an error - run_input = create_input() # No messages will cause _NoMessagesError - - events = await run_and_collect_events(agent, run_input, on_complete=error_callback) - - # Verify callback was not called due to error - assert len(captured_results) == 0 - - # Verify error event was sent - assert len(events) > 0 - assert events[0]['type'] == 'RUN_STARTED' - assert any(event['type'] == 'RUN_ERROR' for event in events) - - -async def test_protocol_messages_to_pai_messages() -> None: +async def test_messages() -> None: messages = [ SystemMessage( id='msg_1', @@ -1448,7 +1423,20 @@ async def test_protocol_messages_to_pai_messages() -> None: ), ] - assert protocol_messages_to_pai_messages(messages) == snapshot( + adapter = AGUIAdapter( + Agent(TestModel()), + # TODO (DouweM): Have a convenience method so the useless args aren't necessary + request=RunAgentInput( + messages=messages, + thread_id='test_thread', + run_id='test_run', + state={}, + tools=[], + context=[], + forwarded_props={}, + ), + ) + assert adapter.messages == snapshot( [ ModelRequest( parts=[ diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index be55bcccec..93109970a6 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -1,5 +1,7 @@ from __future__ import annotations +import json + import pytest from inline_snapshot import snapshot @@ -8,23 +10,12 @@ from pydantic_ai.models.openai import OpenAIResponsesModel from pydantic_ai.providers.openai import OpenAIProvider from pydantic_ai.ui.vercel_ai import VercelAIAdapter -from pydantic_ai.ui.vercel_ai.request_types import ( +from pydantic_ai.ui.vercel_ai._request_types import ( SubmitMessage, TextUIPart, ToolOutputAvailablePart, UIMessage, ) -from pydantic_ai.ui.vercel_ai.response_types import ( - DoneChunk, - FinishChunk, - ReasoningDeltaChunk, - ReasoningStartChunk, - TextDeltaChunk, - TextStartChunk, - ToolInputDeltaChunk, - ToolInputStartChunk, - ToolOutputAvailableChunk, -) from .conftest import IsStr @@ -43,7 +34,6 @@ async def test_run(allow_model_requests: None, openai_api_key: str): model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) agent = Agent(model=model, builtin_tools=[WebSearchTool()]) - adapter = VercelAIAdapter(agent) data = SubmitMessage( trigger='submit-message', @@ -152,670 +142,713 @@ async def test_run(allow_model_requests: None, openai_api_key: str): ], ) - events = [event async for event in adapter.run_stream(data, None)] + adapter = VercelAIAdapter(agent, request=data) + events = [json.loads(event.removeprefix('data: ')) async for event in adapter.encode_stream(adapter.run_stream())] assert events == snapshot( [ - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', - input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', - input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', - input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - TextStartChunk(id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":null,"type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":null,"type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'text-start', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ Short answer: - Default\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' FastAPI/OpenTelemetry', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' instrumentation already records method', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/route/status', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' FastAPI/OpenTelemetry', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': ' instrumentation already records method', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '/route/status', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ . - To also\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' include HTTP headers', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=', set', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' the capture-', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='headers env', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' include HTTP headers', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ', set', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' the capture-', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'headers env', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ vars. -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' To include request', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=', use the', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/ASGI', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' hooks and add', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' the', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' payload to', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' the span yourself', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' (with red', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='action/size', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' To include request', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '/response bodies', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ', use the', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' FastAPI', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '/ASGI', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' request/response', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' hooks and add', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' the', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' payload to', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' the span yourself', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' (with red', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'action/size', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ limits). How\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' to do it', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' to do it', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ 1)\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Enable header capture', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' (server side', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Enable header capture', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' (server side', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ) - Choose\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' just the', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' headers you need; avoid', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' sensitive ones or sanitize', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' just the', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' headers you need; avoid', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': ' sensitive ones or sanitize', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ them. export OTEL\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_INSTRUMENTATION_HTTP_CAPTURE', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_HEADERS_SERVER_REQUEST="content', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='-type,user', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='-agent"\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='export OTEL_INSTRUMENTATION', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_HTTP_CAPTURE_HEADERS', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_SERVER_RESPONSE="content-type"\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='export OTEL_INSTRUMENTATION_HTTP', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': '_INSTRUMENTATION_HTTP_CAPTURE', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': '_HEADERS_SERVER_REQUEST="content', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '-type,user', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '-agent"\n', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': 'export OTEL_INSTRUMENTATION', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_HTTP_CAPTURE_HEADERS', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '_SERVER_RESPONSE="content-type"\n', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': 'export OTEL_INSTRUMENTATION_HTTP', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ _CAPTURE_HEADERS_SANITIZE_FIELDS="authorization,set-cookie" This makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)) 2)\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Add hooks to capture request', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': ' Add hooks to capture request', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '/response bodies', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ Note:\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=IsStr(), id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' a built-in Python', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' env', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' var to', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' auto-capture', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' HTTP bodies for Fast', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='API/AS', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='GI. Use', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' hooks to look at', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' ASGI receive', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/send events and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' attach (tr', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='uncated) bodies', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' as span attributes', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': IsStr(), 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' a built-in Python', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' env', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' var to', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' auto-capture', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' HTTP bodies for Fast', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'API/AS', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'GI. Use', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' hooks to look at', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' ASGI receive', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '/send events and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' attach (tr', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'uncated) bodies', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' as span attributes', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ . from\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' fastapi import', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' fastapi import', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' FastAPI', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ from opente\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='lemetry.trace', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' import Span', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'lemetry.trace', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' import Span', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ from opente\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='lemetry.instrument', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='ation.fastapi import', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' FastAPIInstrument', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'lemetry.instrument', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'ation.fastapi import', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' FastAPIInstrument', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ or MAX\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_BYTES = ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='2048 ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' # keep this', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' small in prod', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_BYTES = ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '2048 ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' # keep this', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' small in prod', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ def client\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_request_hook(span', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=': Span,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' scope: dict', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=', message:', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_request_hook(span', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ': Span,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' scope: dict', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ', message:', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ dict): \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' if span and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' span.is_record', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='ing() and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' message.get("', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='type") ==', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' "http.request', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' if span and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' span.is_record', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'ing() and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' message.get("', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'type") ==', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' "http.request', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ": body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' = message.get', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '("body")', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' or b"', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ " if\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ body: \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' span.set_attribute', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ( "\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='http.request.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'http.request.body', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ", body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '[:MAX_BYTES', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '].decode("', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'utf-8', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '", "replace', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ "), ) """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ def client_response\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_hook(span:', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' Span, scope', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=': dict,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' message: dict', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_hook(span:', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' Span, scope', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ': dict,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' message: dict', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ): if\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' span and span', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.is_recording', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='() and message', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.get("type', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='") == "', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' span and span', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.is_recording', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '() and message', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.get("type', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '") == "', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'http.response.body', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ": body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' = message.get', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '("body")', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' or b"', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ " if\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ body: \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' span.set_attribute', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ( "\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'http.response.body', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ", body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '[:MAX_BYTES', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '].decode("', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'utf-8', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '", "replace', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ "), ) """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ app = Fast\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ API() Fast\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='APIInstrumentor', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.instrument_app(', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'APIInstrumentor', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.instrument_app(', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ app,\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ client_request\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_hook=client', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_hook=client', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ _request_hook, \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' client_response_hook', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='=client_response', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' client_response_hook', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '=client_response', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ _hook, ) """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ - The hooks\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' receive the AS', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='GI event dict', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='s: http', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.request (with', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' body/more', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_body) and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' http.response.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='. If your', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' bodies can be', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' chunked,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' you may need', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' to accumulate across', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' calls when message', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.get("more', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_body") is', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' True. ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)', - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=')', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' receive the AS', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'GI event dict', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 's: http', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.request (with', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' body/more', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '_body) and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' http.response.body', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '. If your', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' bodies can be', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' chunked,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' you may need', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' to accumulate across', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' calls when message', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.get("more', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '_body") is', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' True. ', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ')', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ 3)\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Be careful with', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' PII and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Be careful with', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' PII and', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ size -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Always limit size', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' and consider redaction', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' before putting payloads', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Always limit size', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' and consider redaction', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' before putting payloads', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ on spans. -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Use the sanitize', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' env var above', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' for sensitive headers', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='. ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n', - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Use the sanitize', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' env var above', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' for sensitive headers', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '. ', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ Optional: correlate logs\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ with traces -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' If you also want', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' details in logs with', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' trace IDs, enable', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' Python log correlation:\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' If you also want', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' request/response', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' details in logs with', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' trace IDs, enable', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' Python log correlation:\n', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ export OTEL_P\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='YTHON_LOG_COR', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='RELATION=true', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'YTHON_LOG_COR', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'RELATION=true', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ or programmatically\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ : from opente\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='lemetry.instrumentation', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.logging import LoggingInstrument', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'lemetry.instrumentation', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '.logging import LoggingInstrument', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ or LoggingInstrument\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='or().instrument(set', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_logging_format=True)\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'or().instrument(set', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '_logging_format=True)\n', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ This injects trace\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_id/span_id into', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' log records so you', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' can line up logs', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' with the span that', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' carries the HTTP payload', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' attributes. ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n', - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_id/span_id into', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' log records so you', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' can line up logs', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' with the span that', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' carries the HTTP payload', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' attributes. ', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ Want me to tailor\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' the hook to only', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' capture JSON bodies,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' skip binary content,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' or accumulate chunked', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' bodies safely?', id='d775971d84c848228275a25a097b6409'), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', - input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', - input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', - input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} - ), - FinishChunk(), - DoneChunk(), + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' the hook to only', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' capture JSON bodies,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' skip binary content,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' or accumulate chunked', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' bodies safely?', 'id': IsStr()}, + {'type': 'finish'}, ] ) From 2acc1c3865d0aca9339183fbf62043d099a2c5ae Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 08:46:54 +0000 Subject: [PATCH 06/10] fix typecheck, tests, linter --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 6 +- pydantic_ai_slim/pydantic_ai/ag_ui.py | 14 +++- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 81 +++++++------------ .../pydantic_ai/ui/ag_ui/_adapter.py | 24 +----- .../pydantic_ai/ui/ag_ui/_event_stream.py | 14 +--- .../pydantic_ai/ui/event_stream.py | 27 +------ .../pydantic_ai/ui/vercel_ai/_adapter.py | 44 +++++----- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 13 +-- tests/test_ag_ui.py | 8 +- tests/test_agent.py | 8 +- 10 files changed, 82 insertions(+), 157 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index fc883eee9b..bd6c863228 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -243,13 +243,13 @@ async def run( # noqa: C901 if self.user_prompt is not None: parts.append(_messages.UserPromptPart(self.user_prompt)) - if not parts: - raise exceptions.UserError('No messages provided') - next_message = _messages.ModelRequest(parts=parts) next_message.instructions = await ctx.deps.get_instructions(run_context) + if not messages and not next_message.parts and not next_message.instructions: + raise exceptions.UserError('No message history, user prompt, or instructions provided') + return ModelRequestNode[DepsT, NodeRunEndT](request=next_message) async def _handle_deferred_tool_results( # noqa: C901 diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index b4c32d8a12..df03646ae9 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -67,6 +67,8 @@ def __init__( *, # Agent.iter parameters. output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, @@ -97,6 +99,8 @@ def __init__( output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. @@ -139,6 +143,8 @@ async def endpoint(request: Request) -> Response: agent, request, output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, model=model, deps=deps, model_settings=model_settings, @@ -156,6 +162,8 @@ async def handle_ag_ui_request( request: Request, *, output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, @@ -173,6 +181,8 @@ async def handle_ag_ui_request( output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. @@ -186,7 +196,7 @@ async def handle_ag_ui_request( Returns: A streaming Starlette response with AG-UI protocol events. """ - return await AGUIAdapter.dispatch_request( + return await AGUIAdapter[AgentDepsT].dispatch_request( agent, request, deps=deps, @@ -227,6 +237,8 @@ async def run_ag_ui( output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 00b8afa311..0fe6d1f006 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -116,30 +116,7 @@ class StateDeps(Generic[StateT]): @dataclass class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): - """Base adapter for handling UI protocol requests and streaming responses. - - This class provides a unified interface for request/response handling across different - UI protocols (AG-UI, Vercel AI, etc.). It handles: - - Request parsing and validation - - Message format conversion (protocol messages → pAI messages) - - Agent execution and event streaming - - Error handling (validation errors vs streaming errors) - - SSE encoding - - Type Parameters: - RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) - MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) - EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, BaseChunk) - AgentDepsT: Agent dependencies type - - Example: - ```python - class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): - def messages(self, messages: list[MyMessage]) -> list[ModelMessage]: - # Convert protocol messages to pAI messages - ... - ``` - """ + """TODO (DouwM): Docstring.""" agent: AbstractAgent[AgentDepsT] """The Pydantic AI agent to run.""" @@ -169,23 +146,9 @@ def toolset(self) -> AbstractToolset[AgentDepsT] | None: return None @cached_property - def raw_state(self) -> dict[str, Any]: + def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" - return {} - - def deps_with_state(self, deps: AgentDepsT) -> AgentDepsT: - raw_state: dict[str, Any] = self.raw_state - if isinstance(deps, StateHandler): - if isinstance(deps.state, BaseModel): - state = type(deps.state).model_validate(raw_state) - else: - state = raw_state - - return replace(deps, state=state) - elif raw_state: - raise UserError( - f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' - ) + return None @abstractmethod def encode_event(self, event: EventT, accept: str | None = None) -> str: @@ -258,12 +221,10 @@ async def run_stream( """Run the agent with the AG-UI run input and stream AG-UI protocol events. Args: - agent: The agent to run. - run_input: The AG-UI run input containing thread_id, run_id, messages, etc. - accept: The accept header value for the run. - output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. @@ -284,7 +245,18 @@ async def run_stream( output_type = [output_type or self.agent.output_type, DeferredToolRequests] toolsets = [*toolsets, toolset] if toolsets else [toolset] - deps = self.deps_with_state(deps) + if isinstance(deps, StateHandler): + raw_state = self.raw_state or {} + if isinstance(deps.state, BaseModel): + state = type(deps.state).model_validate(raw_state) + else: + state = raw_state + + deps = replace(deps, state=state) + elif self.raw_state: + raise UserError( + f'State is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' + ) async for event in self.process_stream( self.agent.run_stream_events( @@ -325,16 +297,21 @@ async def dispatch_request( """Handle an AG-UI request and return a streaming response. Args: + agent: The agent to run. request: The incoming Starlette/FastAPI request. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. Returns: A streaming Starlette response with AG-UI protocol events. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 0681caa2c4..e3694dd2b7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -97,25 +97,7 @@ def label(self) -> str: class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT]): - """Adapter for handling AG-UI protocol requests with Pydantic AI agents. - - This adapter provides an interface for integrating Pydantic AI agents - with the AG-UI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.ag_ui import AGUIAdapter - - agent = Agent('openai:gpt-4') - adapter = AGUIAdapter(agent) - - async def handle_request(request: RunAgentInput, deps=None): - async for event_str in adapter.run_stream_encoded(request, deps): - yield event_str - ``` - """ + """TODO (DouwM): Docstring.""" def create_event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: """Create an event stream for the adapter.""" @@ -134,9 +116,9 @@ def toolset(self) -> AbstractToolset[AgentDepsT] | None: return None @cached_property - def raw_state(self) -> dict[str, Any]: + def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" - return self.request.state or {} + return self.request.state def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: """Encode an AG-UI event as SSE. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index e472bc204f..59ffec19a0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -68,19 +68,7 @@ class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): - """Transforms Pydantic AI agent events into AG-UI protocol events. - - This class handles the stateful transformation of streaming agent events - into the AG-UI protocol format, managing message IDs, thinking mode state, - and tool call ID mappings for builtin tools. - - Example: - ```python - event_stream = AGUIEventStream() - async for ag_ui_event in event_stream.handle_event(pai_event): - print(ag_ui_event) - ``` - """ + """TODO (DouwM): Docstring.""" def __init__(self, request: RunAgentInput) -> None: """Initialize AG-UI event stream state.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 927e910409..7f94135143 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -47,28 +47,7 @@ @dataclass class BaseEventStream(ABC, Generic[RequestT, EventT, AgentDepsT]): - """Base class for transforming pAI agent events into protocol-specific events. - - This class provides a granular method-per-part-type pattern that makes it easy to customize - specific event transformations by overriding individual handler methods. - - Subclasses should: - 1. Initialize state attributes in `__init__` if needed - 2. Implement all abstract `handle_*` methods for event transformation - 3. Implement error handling methods - 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) - - Example: - ```python - class MyEventStream(BaseEventStream[MyEvent, MyDeps]): - def __init__(self): - super().__init__() - self.custom_state = {} - - async def handle_text_start(self, part: TextPart): - yield MyTextStartEvent(id=self.new_message_id(), text=part.content) - ``` - """ + """TODO (DouwM): Docstring.""" request: RequestT @@ -109,13 +88,13 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat yield e async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 - """Transform a pAI agent event into protocol-specific events. + """Transform a Pydantic AI agent event into protocol-specific events. This method dispatches to specific `handle_*` methods based on event and part type. Subclasses should implement the individual handler methods rather than overriding this. Args: - event: The pAI agent event to transform. + event: The Pydantic AI agent event to transform. Yields: Protocol-specific events. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 8f3053ff89..77d66ba4b6 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -46,25 +46,7 @@ @dataclass class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT]): - """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. - - This adapter provides a simplified interface for integrating Pydantic AI agents - with the Vercel AI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.vercel_ai import VercelAIAdapter - - agent = Agent('openai:gpt-4') - adapter = VercelAIAdapter(agent) - - async def handle_request(data: RequestData, deps=None): - async for chunk in adapter.run_stream(data, deps): - yield chunk.sse() - ``` - """ + """TODO (DouwM): Docstring.""" def create_event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: return VercelAIEventStream(self.request) @@ -98,16 +80,22 @@ async def dispatch_request( """Handle an AG-UI request and return a streaming response. Args: + agent: The agent to run. request: The incoming Starlette/FastAPI request. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. + + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. Returns: A streaming Starlette response with AG-UI protocol events. @@ -181,3 +169,7 @@ def messages(self) -> list[ModelMessage]: pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) return pai_messages + + # TODO (DouweM): model, builtin_tools? + + # TODO (DouweM): static load_messages, dump_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 26cab17894..9fc938a4d7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -51,18 +51,7 @@ def _json_dumps(obj: Any) -> str: class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): - """Transforms Pydantic AI agent events into Vercel AI protocol events. - - This class handles the stateful transformation of streaming agent events - into the Vercel AI protocol format, managing message IDs and final result tool tracking. - - Example: - ```python - event_stream = VercelAIEventStream() - async for vercel_event in event_stream.handle_event(pai_event): - print(vercel_event.sse()) - ``` - """ + """TODO (DouwM): Docstring.""" def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index e7e0be7746..ea8962abda 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -255,7 +255,11 @@ async def stream_function( 'threadId': IsStr(), 'runId': IsStr(), }, - {'type': 'RUN_ERROR', 'message': 'No messages provided', 'code': 'UserError'}, + { + 'type': 'RUN_ERROR', + 'message': 'No message history, user prompt, or instructions provided', + 'code': 'UserError', + }, ] ) @@ -1152,7 +1156,7 @@ async def test_request_with_state_without_handler() -> None: with pytest.raises( UserError, - match='AG-UI state is provided but `deps` of type `NoneType` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.', + match='State is provided but `deps` of type `NoneType` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.', ): async for _ in run_ag_ui(agent, run_input): pass diff --git a/tests/test_agent.py b/tests/test_agent.py index b27e815651..fc6b97ace7 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -4562,7 +4562,7 @@ async def call_tools_parallel(messages: list[ModelMessage], info: AgentInfo) -> server = MCPServerStdio('python', ['-m', 'tests.mcp_server']) agent = Agent(FunctionModel(call_tools_parallel), toolsets=[server]) - result = agent.run_sync() + result = agent.run_sync('call tools in parallel') assert result.output == snapshot('finished') @@ -4620,11 +4620,13 @@ def call_second(): FunctionModel(call_tools_sequential), toolsets=[sequential_toolset], output_type=[str, DeferredToolRequests] ) + user_prompt = 'call a lot of tools' + if mode == 'contextmanager': with agent.sequential_tool_calls(): - result = agent.run_sync() + result = agent.run_sync(user_prompt) else: - result = agent.run_sync() + result = agent.run_sync(user_prompt) assert result.output == snapshot( DeferredToolRequests(approvals=[ToolCallPart(tool_name='requires_approval', tool_call_id=IsStr())]) From 2d7c781b7c3fe114a1656950987ed69d4a88b751 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 09:30:22 +0000 Subject: [PATCH 07/10] Fix Vercel --- .../pydantic_ai/ui/vercel_ai/_adapter.py | 2 +- .../pydantic_ai/ui/vercel_ai/_request_types.py | 17 +++++++++++++---- tests/test_vercel_ai.py | 10 +++++++--- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 77d66ba4b6..7cb064e8cb 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -57,7 +57,7 @@ def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: @classmethod async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" - return request_data_ta.validate_json(await request.json()) + return request_data_ta.validate_json(await request.body()) @classmethod async def dispatch_request( diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py index ea30436378..c2aa43f506 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py @@ -4,9 +4,9 @@ https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts """ -from typing import Any, Literal +from typing import Annotated, Any, Literal -from pydantic import TypeAdapter +from pydantic import Discriminator, TypeAdapter from ._utils import CamelBaseModel, ProviderMetadata @@ -56,7 +56,7 @@ class UIMessage(CamelBaseModel): parts: list[UIPart] -class SubmitMessage(CamelBaseModel): +class SubmitMessage(CamelBaseModel, extra='allow'): """Submit message request.""" trigger: Literal['submit-message'] = 'submit-message' @@ -64,7 +64,16 @@ class SubmitMessage(CamelBaseModel): messages: list[UIMessage] -RequestData = SubmitMessage +class RegenerateMessage(CamelBaseModel, extra='allow'): + """Ask the agent to regenerate a message.""" + + trigger: Literal['regenerate-message'] + id: str + messages: list[UIMessage] + message_id: str + + +RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')] # Type adapter for parsing requests request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 93109970a6..0f61684697 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -7,8 +7,6 @@ from pydantic_ai import Agent from pydantic_ai.builtin_tools import WebSearchTool -from pydantic_ai.models.openai import OpenAIResponsesModel -from pydantic_ai.providers.openai import OpenAIProvider from pydantic_ai.ui.vercel_ai import VercelAIAdapter from pydantic_ai.ui.vercel_ai._request_types import ( SubmitMessage, @@ -17,7 +15,12 @@ UIMessage, ) -from .conftest import IsStr +from .conftest import IsStr, try_import + +with try_import() as openai_import_successful: + from pydantic_ai.models.openai import OpenAIResponsesModel + from pydantic_ai.providers.openai import OpenAIProvider + pytestmark = [ pytest.mark.anyio, @@ -31,6 +34,7 @@ ] +@pytest.mark.skipif(not openai_import_successful(), reason='OpenAI not installed') async def test_run(allow_model_requests: None, openai_api_key: str): model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) agent = Agent(model=model, builtin_tools=[WebSearchTool()]) From 03862a5750d6c89cc3c585acfccd5782c82061d9 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 10:03:24 +0000 Subject: [PATCH 08/10] cleanup --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 58 +++++----- .../pydantic_ai/ui/ag_ui/__init__.py | 4 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 40 +++---- .../pydantic_ai/ui/ag_ui/_event_stream.py | 19 ++++ .../pydantic_ai/ui/event_stream.py | 28 ++++- .../pydantic_ai/ui/vercel_ai/__init__.py | 5 +- .../pydantic_ai/ui/vercel_ai/_adapter.py | 106 +++++------------- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 3 + .../pydantic_ai/ui/vercel_ai/_utils.py | 5 +- 9 files changed, 125 insertions(+), 143 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 0fe6d1f006..a75d3d03da 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -7,7 +7,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from collections.abc import AsyncIterator, Awaitable, Callable, Sequence +from collections.abc import AsyncIterator, Awaitable, Callable, Mapping, Sequence from dataclasses import Field, dataclass, replace from functools import cached_property from http import HTTPStatus @@ -53,7 +53,7 @@ """Type variable for protocol-specific event types.""" -RequestT = TypeVar('RequestT') +RunRequestT = TypeVar('RunRequestT') """Type variable for protocol-specific request types.""" MessageT = TypeVar('MessageT') @@ -115,17 +115,32 @@ class StateDeps(Generic[StateT]): @dataclass -class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): +class BaseAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT]): """TODO (DouwM): Docstring.""" agent: AbstractAgent[AgentDepsT] """The Pydantic AI agent to run.""" - request: RequestT + request: RunRequestT # TODO (DouweM): rename """The protocol-specific request object.""" + @classmethod + async def validate_request(cls, request: Request) -> RunRequestT: + """Validate the request and return the validated request.""" + raise NotImplementedError('validate_request is not implemented') + + @classmethod + @abstractmethod + def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: + """Load messages from the request and return the loaded messages.""" + @abstractmethod - def create_event_stream(self) -> BaseEventStream[RequestT, EventT, AgentDepsT]: + def dump_messages(self, messages: Sequence[ModelMessage]) -> list[MessageT]: + """Dump messages to the request and return the dumped messages.""" + + @cached_property + @abstractmethod + def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT]: """Create an event stream for the adapter.""" @cached_property @@ -150,32 +165,24 @@ def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return None - @abstractmethod - def encode_event(self, event: EventT, accept: str | None = None) -> str: - """Encode a protocol event as an SSE string. - - Args: - event: The protocol-specific event. - accept: The accept header value for encoding format. + @property + def result(self) -> AgentRunResult | None: + """Get the result of the agent run.""" + return self.event_stream.result - Returns: - Formatted string. - """ + @property + def response_headers(self) -> Mapping[str, str] | None: + """Get the response headers for the adapter.""" + return None - async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: + def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: """Encode a stream of events as SSE strings. Args: stream: The stream of events to encode. accept: The accept header value for encoding format. """ - async for event in stream: - yield self.encode_event(event, accept) - - @classmethod - async def validate_request(cls, request: Request) -> RequestT: - """Validate the request and return the validated request.""" - raise NotImplementedError('validate_request is not implemented') + return self.event_stream.encode_stream(stream, accept) async def process_stream( self, @@ -188,7 +195,7 @@ async def process_stream( stream: The stream of events to process. on_complete: Optional callback function called when the agent run completes successfully. """ - event_stream = self.create_event_stream() + event_stream = self.event_stream async for event in event_stream.handle_stream(stream): yield event @@ -343,5 +350,6 @@ async def dispatch_request( on_complete=on_complete, ), accept=request.headers.get('accept'), - ) + ), + headers=adapter.response_headers, ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 53784efb51..eccd37ed89 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,7 +1,7 @@ """AG-UI protocol integration for Pydantic AI agents.""" -from ._adapter import SSE_CONTENT_TYPE, AGUIAdapter -from ._event_stream import AGUIEventStream +from ._adapter import AGUIAdapter +from ._event_stream import SSE_CONTENT_TYPE, AGUIEventStream __all__ = [ 'AGUIAdapter', diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index e3694dd2b7..b4d6c98b98 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -2,11 +2,11 @@ from __future__ import annotations +from collections.abc import Sequence from functools import cached_property from typing import ( TYPE_CHECKING, Any, - Final, ) from ... import ExternalToolset, ToolDefinition @@ -39,7 +39,6 @@ ToolMessage, UserMessage, ) - from ag_ui.encoder import EventEncoder from ..adapter import BaseAdapter from ..event_stream import BaseEventStream @@ -64,9 +63,6 @@ __all__ = ['AGUIAdapter'] -SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' -"""Content type header value for Server-Sent Events (SSE).""" - # Frontend toolset @@ -99,15 +95,21 @@ def label(self) -> str: class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT]): """TODO (DouwM): Docstring.""" - def create_event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: - """Create an event stream for the adapter.""" - return AGUIEventStream(self.request) - @classmethod async def validate_request(cls, request: Request) -> RunAgentInput: """Validate the request and return the validated request.""" return RunAgentInput.model_validate(await request.json()) + def dump_messages(self, messages: Sequence[ModelMessage]) -> list[Message]: + """Dump messages to the request and return the dumped messages.""" + # TODO (DouweM): bring in from https://github.com/pydantic/pydantic-ai/pull/3068 + raise NotImplementedError + + @cached_property + def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: + """Create an event stream for the adapter.""" + return AGUIEventStream(self.request) + @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: """Get the toolset for the adapter.""" @@ -120,19 +122,6 @@ def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return self.request.state - def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: - """Encode an AG-UI event as SSE. - - Args: - event: The AG-UI event to encode. - accept: The accept header value for encoding format. - - Returns: - The SSE-formatted string. - """ - encoder = EventEncoder(accept=accept or SSE_CONTENT_TYPE) - return encoder.encode(event) - @cached_property def messages(self) -> list[ModelMessage]: """Convert AG-UI messages to Pydantic AI messages. @@ -143,12 +132,17 @@ def messages(self) -> list[ModelMessage]: Returns: List of Pydantic AI ModelMessage objects. """ + return self.load_messages(self.request.messages) + + @classmethod + def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: + """Load messages from the request and return the loaded messages.""" result: list[ModelMessage] = [] tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. request_parts: list[ModelRequestPart] | None = None response_parts: list[ModelResponsePart] | None = None - for msg in self.request.messages: + for msg in messages: if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) ): diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 59ffec19a0..097f2f253f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -51,6 +51,8 @@ ToolCallResultEvent, ToolCallStartEvent, ) + from ag_ui.encoder import EventEncoder + except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' @@ -64,6 +66,10 @@ 'RunFinishedEvent', ] +SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' +"""Content type header value for Server-Sent Events (SSE).""" + + BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' @@ -77,6 +83,19 @@ def __init__(self, request: RunAgentInput) -> None: self.thinking: bool = False self.builtin_tool_call_ids: dict[str, str] = {} + def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: + """Encode an AG-UI event as SSE. + + Args: + event: The AG-UI event to encode. + accept: The accept header value for encoding format. + + Returns: + The SSE-formatted string. + """ + encoder = EventEncoder(accept=accept or SSE_CONTENT_TYPE) + return encoder.encode(event) + async def before_stream(self) -> AsyncIterator[BaseEvent]: """Yield events before agent streaming starts.""" yield RunStartedEvent( diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 7f94135143..68d77342dc 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -6,7 +6,7 @@ from __future__ import annotations -from abc import ABC +from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field from typing import Generic, TypeVar @@ -39,17 +39,17 @@ EventT = TypeVar('EventT') """Type variable for protocol-specific event types.""" -RequestT = TypeVar('RequestT') +RunRequestT = TypeVar('RunRequestT') """Type variable for request types.""" SourceEvent = AgentStreamEvent | AgentRunResultEvent @dataclass -class BaseEventStream(ABC, Generic[RequestT, EventT, AgentDepsT]): +class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT]): """TODO (DouwM): Docstring.""" - request: RequestT + request: RunRequestT result: AgentRunResult | None = None @@ -64,6 +64,26 @@ def new_message_id(self) -> str: self.message_id = str(uuid4()) return self.message_id + @abstractmethod + def encode_event(self, event: EventT, accept: str | None = None) -> str: + """Encode an event as a string. + + Args: + event: The event to encode. + accept: The accept header value for encoding format. + """ + raise NotImplementedError + + async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: + """Encode a stream of events as SSE strings. + + Args: + stream: The stream of events to encode. + accept: The accept header value for encoding format. + """ + async for event in stream: + yield self.encode_event(event, accept) + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: """Handle a stream of agent events. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index e0788a1404..511bd68336 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -44,14 +44,11 @@ ToolOutputAvailableChunk, ToolOutputErrorChunk, ) -from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata +from ._utils import ProviderMetadata __all__ = [ # Utilities - 'CamelBaseModel', 'ProviderMetadata', - 'JSONValue', - 'VERCEL_AI_DSP_HEADERS', # Request types 'RequestData', 'TextUIPart', diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 7cb064e8cb..382db45181 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -2,15 +2,11 @@ from __future__ import annotations -from collections.abc import Sequence +from collections.abc import Mapping, Sequence from dataclasses import dataclass from functools import cached_property -from typing import ( - Any, -) -from ... import DeferredToolResults -from ...agent import AbstractAgent, AgentDepsT +from ...agent import AgentDepsT from ...messages import ( ModelMessage, ModelRequest, @@ -19,27 +15,23 @@ TextPart, UserPromptPart, ) -from ...models import KnownModelName, Model -from ...output import OutputSpec -from ...settings import ModelSettings -from ...toolsets import AbstractToolset -from ...usage import RunUsage, UsageLimits -from ..adapter import BaseAdapter, OnCompleteFunc +from ..adapter import BaseAdapter from ..event_stream import BaseEventStream from ._event_stream import VercelAIEventStream from ._request_types import RequestData, TextUIPart, UIMessage, request_data_ta from ._response_types import BaseChunk -from ._utils import VERCEL_AI_DSP_HEADERS try: from starlette.requests import Request - from starlette.responses import Response except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e +# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol +VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} + __all__ = ['VercelAIAdapter'] @@ -48,75 +40,24 @@ class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT]): """TODO (DouwM): Docstring.""" - def create_event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: - return VercelAIEventStream(self.request) - - def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: - return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' - @classmethod async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" return request_data_ta.validate_json(await request.body()) - @classmethod - async def dispatch_request( - cls, - agent: AbstractAgent[AgentDepsT, Any], - request: Request, - *, - message_history: Sequence[ModelMessage] | None = None, - deferred_tool_results: DeferredToolResults | None = None, - model: Model | KnownModelName | str | None = None, - deps: AgentDepsT = None, - output_type: OutputSpec[Any] | None = None, - model_settings: ModelSettings | None = None, - usage_limits: UsageLimits | None = None, - usage: RunUsage | None = None, - infer_name: bool = True, - toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, - on_complete: OnCompleteFunc | None = None, - ) -> Response: - """Handle an AG-UI request and return a streaming response. + def dump_messages(self, messages: Sequence[ModelMessage]) -> list[UIMessage]: + """Dump messages to the request and return the dumped messages.""" + # TODO (DouweM): implement + raise NotImplementedError - Args: - agent: The agent to run. - request: The incoming Starlette/FastAPI request. - - output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no - output validators since output validators would expect an argument that matches the agent's output type. - message_history: History of the conversation so far. - deferred_tool_results: Optional results for deferred tool calls in the message history. - model: Optional model to use for this run, required if `model` was not set when creating the agent. - deps: Optional dependencies to use for this run. - model_settings: Optional settings to use for this model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. - infer_name: Whether to try to infer the agent name from the call frame if it's not set. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback function called when the agent run completes successfully. - The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + @cached_property + def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: + return VercelAIEventStream(self.request) - Returns: - A streaming Starlette response with AG-UI protocol events. - """ - response = await super().dispatch_request( - agent, - request, - message_history=message_history, - deferred_tool_results=deferred_tool_results, - model=model, - deps=deps, - output_type=output_type, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ) - response.headers.update(VERCEL_AI_DSP_HEADERS) - return response + @property + def response_headers(self) -> Mapping[str, str] | None: + """Get the response headers for the adapter.""" + return VERCEL_AI_DSP_HEADERS @cached_property def messages(self) -> list[ModelMessage]: @@ -131,9 +72,16 @@ def messages(self) -> list[ModelMessage]: Raises: ValueError: If message format is not supported. """ + return self.load_messages(self.request.messages) + + # TODO (DouweM): model, builtin_tools? + + @classmethod + def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: + """Load messages from the request and return the loaded messages.""" pai_messages: list[ModelMessage] = [] - for msg in self.request.messages: + for msg in messages: if msg.role == 'user': # User message - extract text from parts texts: list[str] = [] @@ -169,7 +117,3 @@ def messages(self) -> list[ModelMessage]: pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) return pai_messages - - # TODO (DouweM): model, builtin_tools? - - # TODO (DouweM): static load_messages, dump_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 9fc938a4d7..cd4dc0a097 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -58,6 +58,9 @@ def __init__(self, request: RequestData) -> None: super().__init__(request) self._final_result_tool_id: str | None = None + def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: + return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' + async def after_stream(self) -> AsyncIterator[BaseChunk]: """Yield events after agent streaming completes.""" # Close the final result tool if there was one diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py index d3dfb4e8cd..b517a7c23d 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py @@ -10,10 +10,7 @@ from pydantic import BaseModel, ConfigDict from pydantic.alias_generators import to_camel -__all__ = ('CamelBaseModel', 'ProviderMetadata', 'JSONValue', 'VERCEL_AI_DSP_HEADERS') - -# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol -VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} +__all__ = ('CamelBaseModel', 'ProviderMetadata', 'JSONValue') # Technically this is recursive union of JSON types; for simplicity, we call it Any JSONValue = Any From 6f510535665ab11c45530f176e42dae8f224adf3 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 12:32:52 +0000 Subject: [PATCH 09/10] Refactor AG-UI streaming --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 54 ++-- .../pydantic_ai/ui/ag_ui/_adapter.py | 2 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 158 +++++------ .../pydantic_ai/ui/event_stream.py | 256 ++++++++++++++---- .../pydantic_ai/ui/vercel_ai/_adapter.py | 2 +- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 9 +- tests/test_ag_ui.py | 12 +- 7 files changed, 316 insertions(+), 177 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index a75d3d03da..7ff0f800ca 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -138,7 +138,7 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: def dump_messages(self, messages: Sequence[ModelMessage]) -> list[MessageT]: """Dump messages to the request and return the dumped messages.""" - @cached_property + @property @abstractmethod def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT]: """Create an event stream for the adapter.""" @@ -165,11 +165,6 @@ def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return None - @property - def result(self) -> AgentRunResult | None: - """Get the result of the agent run.""" - return self.event_stream.result - @property def response_headers(self) -> Mapping[str, str] | None: """Get the response headers for the adapter.""" @@ -283,6 +278,21 @@ async def run_stream( ): yield event + async def stream_response(self, stream: AsyncIterator[EventT], accept: str | None = None) -> Response: + """Stream a response to the client. + + Args: + stream: The stream of events to encode. + accept: The accept header value for encoding format. + """ + return StreamingResponse( + self.encode_stream( + stream, + accept=accept, + ), + headers=self.response_headers, + ) + @classmethod async def dispatch_request( cls, @@ -334,22 +344,18 @@ async def dispatch_request( adapter = cls(agent=agent, request=request_data) - return StreamingResponse( - adapter.encode_stream( - adapter.run_stream( - message_history=message_history, - deferred_tool_results=deferred_tool_results, - deps=deps, - output_type=output_type, - model=model, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ), - accept=request.headers.get('accept'), - ), - headers=adapter.response_headers, + run_stream = adapter.run_stream( + message_history=message_history, + deferred_tool_results=deferred_tool_results, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, ) + + return await adapter.stream_response(run_stream, accept=request.headers.get('accept')) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index b4d6c98b98..8f8e23a6b2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -105,7 +105,7 @@ def dump_messages(self, messages: Sequence[ModelMessage]) -> list[Message]: # TODO (DouweM): bring in from https://github.com/pydantic/pydantic-ai/pull/3068 raise NotImplementedError - @cached_property + @property def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: """Create an event stream for the adapter.""" return AGUIEventStream(self.request) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 097f2f253f..7e5fbf23dd 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -13,9 +13,8 @@ from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, - FinalResultEvent, - FunctionToolCallEvent, FunctionToolResultEvent, + ModelResponsePart, TextPart, TextPartDelta, ThinkingPart, @@ -79,8 +78,7 @@ class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): def __init__(self, request: RunAgentInput) -> None: """Initialize AG-UI event stream state.""" super().__init__(request) - self.part_end: BaseEvent | None = None - self.thinking: bool = False + self.thinking_text = False self.builtin_tool_call_ids: dict[str, str] = {} def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: @@ -105,19 +103,6 @@ async def before_stream(self) -> AsyncIterator[BaseEvent]: async def after_stream(self) -> AsyncIterator[BaseEvent]: """Handle an AgentRunResultEvent, cleaning up any pending state.""" - # Emit any pending part end event - if self.part_end: # pragma: no branch - yield self.part_end - self.part_end = None - - # End thinking mode if still active - if self.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - self.thinking = False - - # Emit finish event yield RunFinishedEvent( thread_id=self.request.thread_id, run_id=self.request.run_id, @@ -125,84 +110,92 @@ async def after_stream(self) -> AsyncIterator[BaseEvent]: async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: """Handle errors during streaming.""" - # Try to get code from exception if it has one, otherwise use class name - code = getattr(error, 'code', error.__class__.__name__) - yield RunErrorEvent(message=str(error), code=code) + yield RunErrorEvent(message=str(error)) - # Granular handlers implementation - - async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: + async def handle_text_start( + self, part: TextPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if self.thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False + if isinstance(previous_part, TextPart): + message_id = previous_part.message_id + else: + message_id = self.new_message_id() + yield TextMessageStartEvent(message_id=message_id) - message_id = self.new_message_id() - yield TextMessageStartEvent(message_id=message_id) if part.content: # pragma: no branch yield TextMessageContentEvent(message_id=message_id, delta=part.content) - self.part_end = TextMessageEndEvent(message_id=message_id) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: """Handle a TextPartDelta.""" if delta.content_delta: # pragma: no branch yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None + async def handle_text_end( + self, part: TextPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a TextPart at end.""" + if not isinstance(next_part, TextPart): + yield TextMessageEndEvent(message_id=self.message_id) - if not self.thinking: + async def handle_thinking_start( + self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at start.""" + if not isinstance(previous_part, ThinkingPart): yield ThinkingStartEvent(type=EventType.THINKING_START) - self.thinking = True if part.content: yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + self.thinking_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPartDelta.""" - if delta.content_delta: # pragma: no branch - if not isinstance(self.part_end, ThinkingTextMessageEndEvent): - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + if not delta.content_delta: + return - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta - ) + if not self.thinking_text: + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + self.thinking_text = True - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None + yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta) - if self.thinking: + async def handle_thinking_end( + self, part: ThinkingPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at end.""" + if self.thinking_text: + yield ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + self.thinking_text = False + + if not isinstance(next_part, ThinkingPart): yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + async for e in self._handle_tool_call_start(part): + yield e + + async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolCallPart at start.""" tool_call_id = part.tool_call_id - if isinstance(part, BuiltinToolCallPart): - builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) - self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id - tool_call_id = builtin_tool_call_id + builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + async for e in self._handle_tool_call_start(part, tool_call_id): + yield e + + async def _handle_tool_call_start( + self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + tool_call_id = tool_call_id or part.tool_call_id message_id = self.message_id or self.new_message_id() + yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) if part.args: yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) - self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) - - def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a BuiltinToolCallPart at start.""" - return self.handle_tool_call_start(part) async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: """Handle a ToolCallPartDelta.""" @@ -215,13 +208,16 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), ) + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart at end.""" + yield ToolCallEndEvent(tool_call_id=part.tool_call_id) + + async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolCallPart at end.""" + yield ToolCallEndEvent(tool_call_id=self.builtin_tool_call_ids[part.tool_call_id]) + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolReturnPart.""" - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] yield ToolCallResultEvent( message_id=self.new_message_id(), @@ -231,26 +227,13 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async content=part.model_response_str(), ) - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: - """Handle a FunctionToolCallEvent. - - This event is emitted when a function tool is called, but no AG-UI events - are needed at this stage since tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result if not isinstance(result, ToolReturnPart): + # TODO (DouweM): Stream RetryPromptParts to the frontend as well? return - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - yield ToolCallResultEvent( message_id=self.new_message_id(), type=EventType.TOOL_CALL_RESULT, @@ -271,11 +254,4 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A if isinstance(item, BaseEvent): # pragma: no branch yield item - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FinalResultEvent. - - This event is emitted when the agent produces a final result, but no AG-UI events - are needed at this stage. - """ - return - yield # Make this an async generator + # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 68d77342dc..92ef862894 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -9,7 +9,7 @@ from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field -from typing import Generic, TypeVar +from typing import Generic, Literal, TypeVar from uuid import uuid4 from ..messages import ( @@ -20,6 +20,7 @@ FinalResultEvent, FunctionToolCallEvent, FunctionToolResultEvent, + ModelResponsePart, PartDeltaEvent, PartStartEvent, TextPart, @@ -50,7 +51,6 @@ class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT]): """TODO (DouwM): Docstring.""" request: RunRequestT - result: AgentRunResult | None = None message_id: str = field(default_factory=lambda: str(uuid4())) @@ -84,7 +84,7 @@ async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None async for event in stream: yield self.encode_event(event, accept) - async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: # noqa: C901 """Handle a stream of agent events. Args: @@ -96,9 +96,49 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.before_stream(): yield e + part: ModelResponsePart | None = None + turn: Literal['request', 'response'] | None = None try: async for event in stream: - async for e in self.handle_event(event): + # TODO (DouweM): Should this be a TurnChange event? + previous_part = part + next_turn = turn + next_part = part + if isinstance(event, PartStartEvent): + next_turn = 'request' + next_part = event.part + elif isinstance(event, FunctionToolCallEvent): + next_turn = 'response' + next_part = None + elif isinstance(event, AgentRunResultEvent): + next_turn = None + next_part = None + + if next_part != part: + if part: + async for e in self.handle_part_end(part, next_part): + yield e + + part = next_part + + if turn != next_turn: + if turn == 'request': + async for e in self.after_request(): + yield e + elif turn == 'response': + async for e in self.after_response(): + yield e + + turn = next_turn + + if turn == 'request': + async for e in self.before_request(): + yield e + elif turn == 'response': + async for e in self.before_response(): + yield e + + async for e in self.handle_event(event, previous_part): yield e except Exception as e: async for e in self.on_error(e): @@ -107,7 +147,9 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.after_stream(): yield e - async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 + async def handle_event( + self, event: SourceEvent, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: """Transform a Pydantic AI agent event into protocol-specific events. This method dispatches to specific `handle_*` methods based on event and part type. @@ -115,6 +157,7 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no Args: event: The Pydantic AI agent event to transform. + previous_part: The previous part. Yields: Protocol-specific events. @@ -123,48 +166,21 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no yield e match event: - case PartStartEvent(part=part): - # Dispatch based on part type - match part: - case TextPart(): - async for e in self.handle_text_start(part): - yield e - case ThinkingPart(): - async for e in self.handle_thinking_start(part): - yield e - case ToolCallPart(): - async for e in self.handle_tool_call_start(part): - yield e - case BuiltinToolCallPart(): - async for e in self.handle_builtin_tool_call_start(part): - yield e - case BuiltinToolReturnPart(): - async for e in self.handle_builtin_tool_return(part): - yield e - case FilePart(): - # FilePart is not currently handled by UI protocols - pass - case PartDeltaEvent(delta=delta): - # Dispatch based on delta type - match delta: - case TextPartDelta(): - async for e in self.handle_text_delta(delta): - yield e - case ThinkingPartDelta(): - async for e in self.handle_thinking_delta(delta): - yield e - case ToolCallPartDelta(): - async for e in self.handle_tool_call_delta(delta): - yield e + case PartStartEvent(): + async for e in self.handle_part_start(event, previous_part): + yield e + case PartDeltaEvent(): + async for e in self.handle_part_delta(event): + yield e + case FinalResultEvent(): + async for e in self.handle_final_result(event): + yield e case FunctionToolCallEvent(): async for e in self.handle_function_tool_call(event): yield e case FunctionToolResultEvent(): async for e in self.handle_function_tool_result(event): yield e - case FinalResultEvent(): - async for e in self.handle_final_result(event): - yield e case AgentRunResultEvent(): self.result = event.result async for e in self.handle_run_result(event): @@ -175,11 +191,86 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no async for e in self.after_event(event): yield e - async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: + async def handle_part_start( + self, event: PartStartEvent, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle a PartStartEvent. + + Args: + event: The PartStartEvent. + previous_part: The previous part. + """ + part = event.part + match part: + case TextPart(): + async for e in self.handle_text_start(part, previous_part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_start(part, previous_part): + yield e + case ToolCallPart(): + async for e in self.handle_tool_call_start(part): + yield e + case BuiltinToolCallPart(): + async for e in self.handle_builtin_tool_call_start(part): + yield e + case BuiltinToolReturnPart(): + async for e in self.handle_builtin_tool_return(part): + yield e + case FilePart(): + async for e in self.handle_file(part): + yield e + + async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[EventT]: + """Handle a PartDeltaEvent. + + Args: + event: The PartDeltaEvent. + """ + delta = event.delta + match delta: + case TextPartDelta(): + async for e in self.handle_text_delta(delta): + yield e + case ThinkingPartDelta(): + async for e in self.handle_thinking_delta(delta): + yield e + case ToolCallPartDelta(): + async for e in self.handle_tool_call_delta(delta): + yield e + + async def handle_part_end( + self, part: ModelResponsePart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle the end of a part. + + Args: + part: The part that ended. + next_part: The new part that started. + """ + # TODO (DouweM): Make this a proper event. Figure out a proper way to do context manager style wrapping + match part: + case TextPart(): + async for e in self.handle_text_end(part, next_part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_end(part, next_part): + yield e + case ToolCallPart(): + async for e in self.handle_tool_call_end(part): + yield e + case BuiltinToolCallPart(): + async for e in self.handle_builtin_tool_call_end(part): + yield e + + async def handle_text_start( + self, part: TextPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: """Handle a TextPart at start. Args: part: The TextPart. + previous_part: The previous part. Yields: Protocol-specific events. @@ -199,11 +290,14 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT] return yield # Make this an async generator - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: + async def handle_thinking_start( + self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: """Handle a ThinkingPart at start. Args: part: The ThinkingPart. + previous_part: The previous part. Yields: Protocol-specific events. @@ -271,11 +365,11 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async return yield # Make this an async generator - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolCallEvent. + async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: + """Handle a FilePart. Args: - event: The function tool call event. + part: The FilePart. Yields: Protocol-specific events. @@ -283,11 +377,35 @@ async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> Async return yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolResultEvent. + async def handle_text_end( + self, part: TextPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle the end of a TextPart.""" + return + yield # Make this an async generator + + async def handle_thinking_end( + self, part: ThinkingPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle the end of a ThinkingPart.""" + return + yield # Make this an async generator + + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[EventT]: + """Handle the end of a ToolCallPart.""" + return + yield # Make this an async generator + + async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle the end of a BuiltinToolCallPart.""" + return + yield # Make this an async generator + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: + """Handle a FinalResultEvent. Args: - event: The function tool result event. + event: The final result event. Yields: Protocol-specific events. @@ -295,11 +413,23 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A return yield # Make this an async generator - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: - """Handle a FinalResultEvent. + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolCallEvent. Args: - event: The final result event. + event: The function tool call event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolResultEvent. + + Args: + event: The function tool result event. Yields: Protocol-specific events. @@ -319,6 +449,8 @@ async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[E return yield # Make this an async generator + # Lifecycle hooks (optional overrides) + async def before_event(self, event: SourceEvent) -> AsyncIterator[EventT]: """Handle an event before it is processed. @@ -337,7 +469,25 @@ async def after_event(self, event: SourceEvent) -> AsyncIterator[EventT]: return yield # Make this an async generator - # Lifecycle hooks (optional overrides) + async def before_request(self) -> AsyncIterator[EventT]: + """Handle a request before it is processed.""" + return + yield # Make this an async generator + + async def after_request(self) -> AsyncIterator[EventT]: + """Handle a request after it is processed.""" + return + yield # Make this an async generator + + async def before_response(self) -> AsyncIterator[EventT]: + """Handle a response before it is processed.""" + return + yield # Make this an async generator + + async def after_response(self) -> AsyncIterator[EventT]: + """Yield events after agent streaming completes.""" + return + yield # Make this an async generator async def before_stream(self) -> AsyncIterator[EventT]: """Yield events before agent streaming starts. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 382db45181..dcb1c404ef 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -50,7 +50,7 @@ def dump_messages(self, messages: Sequence[ModelMessage]) -> list[UIMessage]: # TODO (DouweM): implement raise NotImplementedError - @cached_property + @property def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: return VercelAIEventStream(self.request) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index cd4dc0a097..c0cb11c015 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -17,6 +17,7 @@ FinalResultEvent, FunctionToolCallEvent, FunctionToolResultEvent, + ModelResponsePart, RetryPromptPart, TextPart, TextPartDelta, @@ -74,7 +75,9 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: # Granular handlers implementation - async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseChunk]: + async def handle_text_start( + self, part: TextPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" yield TextStartChunk(id=self.message_id or self.new_message_id()) if part.content: @@ -85,7 +88,9 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChu if delta.content_delta: yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: + async def handle_thinking_start( + self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPart at start.""" if not self.message_id: self.new_message_id() diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index ea8962abda..d7e5bd7573 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -255,11 +255,7 @@ async def stream_function( 'threadId': IsStr(), 'runId': IsStr(), }, - { - 'type': 'RUN_ERROR', - 'message': 'No message history, user prompt, or instructions provided', - 'code': 'UserError', - }, + {'type': 'RUN_ERROR', 'message': 'No message history, user prompt, or instructions provided'}, ] ) @@ -887,6 +883,7 @@ async def stream_function( yield {0: DeltaThinkingPart(content='')} yield "Let's do some thinking" yield '' + yield ' and some more' yield {1: DeltaThinkingPart(content='Thinking ')} yield {1: DeltaThinkingPart(content='about the weather')} yield {2: DeltaThinkingPart(content='')} @@ -922,6 +919,11 @@ async def stream_function( 'messageId': message_id, 'delta': "Let's do some thinking", }, + { + 'type': 'TEXT_MESSAGE_CONTENT', + 'messageId': message_id, + 'delta': ' and some more', + }, {'type': 'TEXT_MESSAGE_END', 'messageId': message_id}, {'type': 'THINKING_START'}, {'type': 'THINKING_TEXT_MESSAGE_START'}, From 013c43bcd57a8025d7c7b03de90231414b852064 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 13:30:07 +0000 Subject: [PATCH 10/10] Start fixing up Vercel events --- .../pydantic_ai/ui/ag_ui/_event_stream.py | 93 +++++------ .../pydantic_ai/ui/event_stream.py | 31 ++-- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 152 +++++++++++------- tests/test_ag_ui.py | 11 ++ tests/test_vercel_ai.py | 10 ++ 5 files changed, 169 insertions(+), 128 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 7e5fbf23dd..92b563e53c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -8,13 +8,13 @@ import json from collections.abc import AsyncIterator, Iterable -from typing import TYPE_CHECKING, Final +from typing import Final from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, FunctionToolResultEvent, - ModelResponsePart, + RetryPromptPart, TextPart, TextPartDelta, ThinkingPart, @@ -26,9 +26,6 @@ from ...tools import AgentDepsT from .. import BaseEventStream -if TYPE_CHECKING: - pass # Agent type is not actually used in this module - try: from ag_ui.core import ( BaseEvent, @@ -78,8 +75,9 @@ class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): def __init__(self, request: RunAgentInput) -> None: """Initialize AG-UI event stream state.""" super().__init__(request) - self.thinking_text = False - self.builtin_tool_call_ids: dict[str, str] = {} + self._thinking_text = False + self._builtin_tool_call_ids: dict[str, str] = {} + self._final_result_tool_id: str | None = None def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: """Encode an AG-UI event as SSE. @@ -112,12 +110,10 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: """Handle errors during streaming.""" yield RunErrorEvent(message=str(error)) - async def handle_text_start( - self, part: TextPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[BaseEvent]: + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" - if isinstance(previous_part, TextPart): - message_id = previous_part.message_id + if follows_text: + message_id = self.message_id else: message_id = self.new_message_id() yield TextMessageStartEvent(message_id=message_id) @@ -130,61 +126,57 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEve if delta.content_delta: # pragma: no branch yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) - async def handle_text_end( - self, part: TextPart, next_part: ModelResponsePart | None = None - ) -> AsyncIterator[BaseEvent]: + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at end.""" - if not isinstance(next_part, TextPart): + if not followed_by_text: yield TextMessageEndEvent(message_id=self.message_id) async def handle_thinking_start( - self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + self, part: ThinkingPart, follows_thinking: bool = False ) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPart at start.""" - if not isinstance(previous_part, ThinkingPart): + if not follows_thinking: yield ThinkingStartEvent(type=EventType.THINKING_START) if part.content: yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) - self.thinking_text = True + self._thinking_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPartDelta.""" if not delta.content_delta: return - if not self.thinking_text: + if not self._thinking_text: yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - self.thinking_text = True + self._thinking_text = True yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta) async def handle_thinking_end( - self, part: ThinkingPart, next_part: ModelResponsePart | None = None + self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPart at end.""" - if self.thinking_text: + if self._thinking_text: yield ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) - self.thinking_text = False + self._thinking_text = False - if not isinstance(next_part, ThinkingPart): + if not followed_by_thinking: yield ThinkingEndEvent(type=EventType.THINKING_END) - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - async for e in self._handle_tool_call_start(part): - yield e + return self._handle_tool_call_start(part) - async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolCallPart at start.""" tool_call_id = part.tool_call_id builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) - self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + self._builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id tool_call_id = builtin_tool_call_id - async for e in self._handle_tool_call_start(part, tool_call_id): - yield e + return self._handle_tool_call_start(part, tool_call_id) async def _handle_tool_call_start( self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None @@ -201,8 +193,8 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' - if tool_call_id in self.builtin_tool_call_ids: - tool_call_id = self.builtin_tool_call_ids[tool_call_id] + if tool_call_id in self._builtin_tool_call_ids: + tool_call_id = self._builtin_tool_call_ids[tool_call_id] yield ToolCallArgsEvent( tool_call_id=tool_call_id, delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), @@ -214,11 +206,11 @@ async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseEv async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolCallPart at end.""" - yield ToolCallEndEvent(tool_call_id=self.builtin_tool_call_ids[part.tool_call_id]) + yield ToolCallEndEvent(tool_call_id=self._builtin_tool_call_ids[part.tool_call_id]) async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolReturnPart.""" - tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] + tool_call_id = self._builtin_tool_call_ids[part.tool_call_id] yield ToolCallResultEvent( message_id=self.new_message_id(), type=EventType.TOOL_CALL_RESULT, @@ -230,28 +222,27 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result - if not isinstance(result, ToolReturnPart): - # TODO (DouweM): Stream RetryPromptParts to the frontend as well? - return + output = result.model_response() if isinstance(result, RetryPromptPart) else result.model_response_str() yield ToolCallResultEvent( message_id=self.new_message_id(), type=EventType.TOOL_CALL_RESULT, role='tool', tool_call_id=result.tool_call_id, - content=result.model_response_str(), + content=output, ) - # Check for AG-UI events returned by tool calls. - possible_event = result.metadata or result.content - if isinstance(possible_event, BaseEvent): - yield possible_event - elif isinstance(possible_event, str | bytes): # pragma: no branch - # Avoid iterable check for strings and bytes. - pass - elif isinstance(possible_event, Iterable): # pragma: no branch - for item in possible_event: # type: ignore[reportUnknownMemberType] - if isinstance(item, BaseEvent): # pragma: no branch - yield item + if isinstance(result, ToolReturnPart): + # Check for AG-UI events returned by tool calls. + possible_event = result.metadata or result.content + if isinstance(possible_event, BaseEvent): + yield possible_event + elif isinstance(possible_event, str | bytes): # pragma: no branch + # Avoid iterable check for strings and bytes. + pass + elif isinstance(possible_event, Iterable): # pragma: no branch + for item in possible_event: # type: ignore[reportUnknownMemberType] + if isinstance(item, BaseEvent): # pragma: no branch + yield item # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 92ef862894..b9bafef105 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -203,10 +203,12 @@ async def handle_part_start( part = event.part match part: case TextPart(): - async for e in self.handle_text_start(part, previous_part): + async for e in self.handle_text_start(part, follows_text=isinstance(previous_part, TextPart)): yield e case ThinkingPart(): - async for e in self.handle_thinking_start(part, previous_part): + async for e in self.handle_thinking_start( + part, follows_thinking=isinstance(previous_part, ThinkingPart) + ): yield e case ToolCallPart(): async for e in self.handle_tool_call_start(part): @@ -251,10 +253,10 @@ async def handle_part_end( # TODO (DouweM): Make this a proper event. Figure out a proper way to do context manager style wrapping match part: case TextPart(): - async for e in self.handle_text_end(part, next_part): + async for e in self.handle_text_end(part, followed_by_text=isinstance(next_part, TextPart)): yield e case ThinkingPart(): - async for e in self.handle_thinking_end(part, next_part): + async for e in self.handle_thinking_end(part, followed_by_thinking=isinstance(next_part, ThinkingPart)): yield e case ToolCallPart(): async for e in self.handle_tool_call_end(part): @@ -262,15 +264,16 @@ async def handle_part_end( case BuiltinToolCallPart(): async for e in self.handle_builtin_tool_call_end(part): yield e + case BuiltinToolReturnPart() | FilePart(): + # These don't have deltas, so they don't need to be ended. + pass - async def handle_text_start( - self, part: TextPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[EventT]: """Handle a TextPart at start. Args: part: The TextPart. - previous_part: The previous part. + follows_text: Whether the part follows a text part. Yields: Protocol-specific events. @@ -290,14 +293,12 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT] return yield # Make this an async generator - async def handle_thinking_start( - self, part: ThinkingPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[EventT]: """Handle a ThinkingPart at start. Args: part: The ThinkingPart. - previous_part: The previous part. + follows_thinking: Whether the part follows a thinking part. Yields: Protocol-specific events. @@ -377,15 +378,13 @@ async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: return yield # Make this an async generator - async def handle_text_end( - self, part: TextPart, next_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[EventT]: """Handle the end of a TextPart.""" return yield # Make this an async generator async def handle_thinking_end( - self, part: ThinkingPart, next_part: ModelResponsePart | None = None + self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[EventT]: """Handle the end of a ThinkingPart.""" return diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index c0cb11c015..a5840eeda8 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -1,23 +1,17 @@ """Vercel AI event stream implementation.""" -# pyright: reportIncompatibleMethodOverride=false - from __future__ import annotations from collections.abc import AsyncIterator -from typing import Any +from typing import Any, Final from pydantic_core import to_json from ...messages import ( - BuiltinToolCallEvent, # type: ignore[reportDeprecated] BuiltinToolCallPart, - BuiltinToolResultEvent, # type: ignore[reportDeprecated] BuiltinToolReturnPart, FinalResultEvent, - FunctionToolCallEvent, FunctionToolResultEvent, - ModelResponsePart, RetryPromptPart, TextPart, TextPartDelta, @@ -25,7 +19,6 @@ ThinkingPartDelta, ToolCallPart, ToolCallPartDelta, - ToolReturnPart, ) from ...tools import AgentDepsT from .. import BaseEventStream @@ -35,8 +28,11 @@ ErrorChunk, FinishChunk, ReasoningDeltaChunk, + ReasoningEndChunk, ReasoningStartChunk, + StartChunk, TextDeltaChunk, + TextEndChunk, TextStartChunk, ToolInputDeltaChunk, ToolInputStartChunk, @@ -51,6 +47,9 @@ def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') +BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' + + class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): """TODO (DouwM): Docstring.""" @@ -58,108 +57,139 @@ def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" super().__init__(request) self._final_result_tool_id: str | None = None + self._builtin_tool_call_ids: dict[str, str] = {} def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' + async def before_stream(self) -> AsyncIterator[BaseChunk]: + """Yield events before agent streaming starts.""" + yield StartChunk() + async def after_stream(self) -> AsyncIterator[BaseChunk]: """Yield events after agent streaming completes.""" - # Close the final result tool if there was one - if tool_call_id := self._final_result_tool_id: - yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) yield FinishChunk() async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: """Handle errors during streaming.""" yield ErrorChunk(error_text=str(error)) - # Granular handlers implementation + async def after_response(self) -> AsyncIterator[BaseChunk]: + """Yield events after agent response completes.""" + # Close the final result tool if there was one + if tool_call_id := self._final_result_tool_id: + yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) - async def handle_text_start( - self, part: TextPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[BaseChunk]: + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" - yield TextStartChunk(id=self.message_id or self.new_message_id()) + if follows_text: + message_id = self.message_id + else: + message_id = self.new_message_id() + yield TextStartChunk(id=message_id) + if part.content: - yield TextDeltaChunk(id=self.message_id, delta=part.content) + yield TextDeltaChunk(id=message_id, delta=part.content) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]: """Handle a TextPartDelta.""" if delta.content_delta: yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseChunk]: + """Handle a TextPart at end.""" + if not followed_by_text: + yield TextEndChunk(id=self.message_id) + async def handle_thinking_start( - self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + self, part: ThinkingPart, follows_thinking: bool = False ) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPart at start.""" - if not self.message_id: - self.new_message_id() - yield ReasoningStartChunk(id=self.message_id) + message_id = self.new_message_id() + yield ReasoningStartChunk(id=message_id) if part.content: - yield ReasoningDeltaChunk(id=self.message_id, delta=part.content) + yield ReasoningDeltaChunk(id=message_id, delta=part.content) async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPartDelta.""" if delta.content_delta: yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: + async def handle_thinking_end( + self, part: ThinkingPart, followed_by_thinking: bool = False + ) -> AsyncIterator[BaseChunk]: + """Handle a ThinkingPart at end.""" + yield ReasoningEndChunk(id=self.message_id) + + def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) - if isinstance(part.args, str): - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) - elif part.args is not None: - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) + return self._handle_tool_call_start(part) def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: - return self.handle_tool_call_start(part) + """Handle a BuiltinToolCallEvent, emitting tool input events.""" + # TODO (DouweM): Reduce duplication with AGUIEventStream + tool_call_id = part.tool_call_id + builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self._builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + + return self._handle_tool_call_start(part) + + async def _handle_tool_call_start( + self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None + ) -> AsyncIterator[BaseChunk]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + tool_call_id = tool_call_id or part.tool_call_id + yield ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=part.tool_name) + if part.args: + yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str()) async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id or '' - if isinstance(delta.args_delta, str): - yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=delta.args_delta) - elif delta.args_delta is not None: - yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(delta.args_delta)) + assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' + if tool_call_id in self._builtin_tool_call_ids: + tool_call_id = self._builtin_tool_call_ids[tool_call_id] + yield ToolInputDeltaChunk( + tool_call_id=tool_call_id, + input_text_delta=delta.args_delta if isinstance(delta.args_delta, str) else _json_dumps(delta.args_delta), + ) + + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseChunk]: + """Handle a ToolCallPart at end.""" + # TODO (DouweM): We don't have the full args available here, + # and we don't seem to need to send this anyway if we've already sent deltas + # yield ToolInputAvailableChunk( + # tool_call_id=part.tool_call_id, + # tool_name=part.tool_name, + # input=part.args, + # provider_executed=True, + # dynamic=False, + # ) + # TODO (DouweM): What are ToolInputAvailableChunk.provider_executed and dynamic? + # Likely use for builtin and external tools. + return + yield # Make this an async generator + + # async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: + # """Handle a BuiltinToolCallPart at end.""" + # pass async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolReturnPart.""" yield ToolOutputAvailableChunk(tool_call_id=part.tool_call_id, output=part.content) - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseChunk]: - """Handle a FunctionToolCallEvent. - - No Vercel AI events are emitted at this stage since tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result - if isinstance(result, ToolReturnPart): - yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - elif isinstance(result, RetryPromptPart): - # For retry prompts, emit the error content as tool output - yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + output = result.model_response() if isinstance(result, RetryPromptPart) else result.content + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=output) - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolCallEvent, emitting tool input events.""" - part = event.part - yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) - if isinstance(part.args, str): - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) - elif part.args is not None: - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) - - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolResultEvent, emitting tool output events.""" - result = event.result - yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FinalResultEvent, tracking the final result tool.""" if event.tool_call_id and event.tool_name: self._final_result_tool_id = event.tool_call_id - # TODO (DouweM): Stream output tool result once it's ready - yield ToolInputStartChunk(tool_call_id=event.tool_call_id, tool_name=event.tool_name) + return + yield # Make this an async generator diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index d7e5bd7573..14eac2e3f4 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -639,6 +639,17 @@ async def stream_function( }, {'type': 'TOOL_CALL_ARGS', 'toolCallId': tool_call_id, 'delta': 'Paris"}'}, {'type': 'TOOL_CALL_END', 'toolCallId': tool_call_id}, + { + 'type': 'TOOL_CALL_RESULT', + 'messageId': IsStr(), + 'toolCallId': tool_call_id, + 'content': """\ +Unknown tool name: 'get_weather'. Available tools: 'get_weather_parts' + +Fix the errors and try again.\ +""", + 'role': 'tool', + }, {'type': 'TEXT_MESSAGE_START', 'messageId': (message_id := IsSameStr()), 'role': 'assistant'}, { 'type': 'TEXT_MESSAGE_CONTENT', diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 0f61684697..10a333be94 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -150,7 +150,9 @@ async def test_run(allow_model_requests: None, openai_api_key: str): events = [json.loads(event.removeprefix('data: ')) async for event in adapter.encode_stream(adapter.run_stream())] assert events == snapshot( [ + {'type': 'start'}, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -167,6 +169,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -183,6 +186,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -199,6 +203,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -215,6 +220,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -231,6 +237,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -247,6 +254,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -263,6 +271,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, {'type': 'text-start', 'id': IsStr()}, { 'type': 'text-delta', @@ -853,6 +862,7 @@ def client_response\ {'type': 'text-delta', 'delta': ' skip binary content,', 'id': IsStr()}, {'type': 'text-delta', 'delta': ' or accumulate chunked', 'id': IsStr()}, {'type': 'text-delta', 'delta': ' bodies safely?', 'id': IsStr()}, + {'type': 'text-end', 'id': IsStr()}, {'type': 'finish'}, ] )