diff --git a/Makefile b/Makefile index 7dd9bbdf..39899d81 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,14 @@ mypy: tests: uv run pytest +.PHONY: snapshots-fix +snapshots-fix: + uv run pytest --inline-snapshot=fix + +.PHONY: snapshots-create +snapshots-create: + uv run pytest --inline-snapshot=create + .PHONY: old_version_tests old_version_tests: UV_PROJECT_ENVIRONMENT=.venv_39 uv run --python 3.9 -m pytest diff --git a/examples/basic/hello_world_jupyter.py b/examples/basic/hello_world_jupyter.py index bb8f14c0..c929a7c6 100644 --- a/examples/basic/hello_world_jupyter.py +++ b/examples/basic/hello_world_jupyter.py @@ -3,7 +3,7 @@ agent = Agent(name="Assistant", instructions="You are a helpful assistant") # Intended for Jupyter notebooks where there's an existing event loop -result = await Runner.run(agent, "Write a haiku about recursion in programming.") # type: ignore[top-level-await] # noqa: F704 +result = await Runner.run(agent, "Write a haiku about recursion in programming.") # type: ignore[top-level-await] # noqa: F704 print(result.final_output) # Code within code loops, diff --git a/pyproject.toml b/pyproject.toml index 8184a670..3ad1d37c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ dev = [ "mkdocstrings[python]>=0.28.0", "coverage>=7.6.12", "playwright==1.50.0", + "inline-snapshot>=0.20.7", ] [tool.uv.workspace] members = ["agents"] @@ -116,4 +117,7 @@ filterwarnings = [ ] markers = [ "allow_call_model_methods: mark test as allowing calls to real model implementations", -] \ No newline at end of file +] + +[tool.inline-snapshot] +format-command="ruff format --stdin-filename {filename}" \ No newline at end of file diff --git a/src/agents/_run_impl.py b/src/agents/_run_impl.py index 2c849506..c0c0ebd0 100644 --- a/src/agents/_run_impl.py +++ b/src/agents/_run_impl.py @@ -25,7 +25,6 @@ from openai.types.responses.response_input_param import ComputerCallOutput from openai.types.responses.response_reasoning_item import ResponseReasoningItem -from . import _utils from .agent import Agent from .agent_output import AgentOutputSchema from .computer import AsyncComputer, Computer @@ -59,6 +58,7 @@ handoff_span, trace, ) +from .util import _coro, _error_tracing if TYPE_CHECKING: from .run import RunConfig @@ -293,7 +293,7 @@ def process_model_response( elif isinstance(output, ResponseComputerToolCall): items.append(ToolCallItem(raw_item=output, agent=agent)) if not computer_tool: - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Computer tool not found", data={}, @@ -324,7 +324,7 @@ def process_model_response( # Regular function tool call else: if output.name not in function_map: - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Tool not found", data={"tool_name": output.name}, @@ -368,7 +368,7 @@ async def run_single_tool( ( agent.hooks.on_tool_start(context_wrapper, agent, func_tool) if agent.hooks - else _utils.noop_coroutine() + else _coro.noop_coroutine() ), func_tool.on_invoke_tool(context_wrapper, tool_call.arguments), ) @@ -378,11 +378,11 @@ async def run_single_tool( ( agent.hooks.on_tool_end(context_wrapper, agent, func_tool, result) if agent.hooks - else _utils.noop_coroutine() + else _coro.noop_coroutine() ), ) except Exception as e: - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Error running tool", data={"tool_name": func_tool.name, "error": str(e)}, @@ -502,7 +502,7 @@ async def execute_handoffs( source=agent, ) if agent.hooks - else _utils.noop_coroutine() + else _coro.noop_coroutine() ), ) @@ -520,7 +520,7 @@ async def execute_handoffs( new_items=tuple(new_step_items), ) if not callable(input_filter): - _utils.attach_error_to_span( + _error_tracing.attach_error_to_span( span_handoff, SpanError( message="Invalid input filter", @@ -530,7 +530,7 @@ async def execute_handoffs( raise UserError(f"Invalid input filter: {input_filter}") filtered = input_filter(handoff_input_data) if not isinstance(filtered, HandoffInputData): - _utils.attach_error_to_span( + _error_tracing.attach_error_to_span( span_handoff, SpanError( message="Invalid input filter result", @@ -591,7 +591,7 @@ async def run_final_output_hooks( hooks.on_agent_end(context_wrapper, agent, final_output), agent.hooks.on_end(context_wrapper, agent, final_output) if agent.hooks - else _utils.noop_coroutine(), + else _coro.noop_coroutine(), ) @classmethod @@ -706,7 +706,7 @@ async def execute( ( agent.hooks.on_tool_start(context_wrapper, agent, action.computer_tool) if agent.hooks - else _utils.noop_coroutine() + else _coro.noop_coroutine() ), output_func, ) @@ -716,7 +716,7 @@ async def execute( ( agent.hooks.on_tool_end(context_wrapper, agent, action.computer_tool, output) if agent.hooks - else _utils.noop_coroutine() + else _coro.noop_coroutine() ), ) diff --git a/src/agents/_utils.py b/src/agents/_utils.py deleted file mode 100644 index 2a0293a6..00000000 --- a/src/agents/_utils.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import annotations - -import re -from collections.abc import Awaitable -from typing import Any, Literal, Union - -from pydantic import TypeAdapter, ValidationError -from typing_extensions import TypeVar - -from .exceptions import ModelBehaviorError -from .logger import logger -from .tracing import Span, SpanError, get_current_span - -T = TypeVar("T") - -MaybeAwaitable = Union[Awaitable[T], T] - - -def transform_string_function_style(name: str) -> str: - # Replace spaces with underscores - name = name.replace(" ", "_") - - # Replace non-alphanumeric characters with underscores - name = re.sub(r"[^a-zA-Z0-9]", "_", name) - - return name.lower() - - -def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T: - partial_setting: bool | Literal["off", "on", "trailing-strings"] = ( - "trailing-strings" if partial else False - ) - try: - validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting) - return validated - except ValidationError as e: - attach_error_to_current_span( - SpanError( - message="Invalid JSON provided", - data={}, - ) - ) - raise ModelBehaviorError( - f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}" - ) from e - - -def attach_error_to_span(span: Span[Any], error: SpanError) -> None: - span.set_error(error) - - -def attach_error_to_current_span(error: SpanError) -> None: - span = get_current_span() - if span: - attach_error_to_span(span, error) - else: - logger.warning(f"No span to add error {error} to") - - -async def noop_coroutine() -> None: - pass diff --git a/src/agents/agent.py b/src/agents/agent.py index 61c0a896..84d0ae96 100644 --- a/src/agents/agent.py +++ b/src/agents/agent.py @@ -6,8 +6,6 @@ from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Callable, Generic, cast -from . import _utils -from ._utils import MaybeAwaitable from .guardrail import InputGuardrail, OutputGuardrail from .handoffs import Handoff from .items import ItemHelpers @@ -16,6 +14,8 @@ from .models.interface import Model from .run_context import RunContextWrapper, TContext from .tool import Tool, function_tool +from .util import _transforms +from .util._types import MaybeAwaitable if TYPE_CHECKING: from .lifecycle import AgentHooks @@ -126,7 +126,7 @@ def as_tool( """ @function_tool( - name_override=tool_name or _utils.transform_string_function_style(self.name), + name_override=tool_name or _transforms.transform_string_function_style(self.name), description_override=tool_description or "", ) async def run_agent(context: RunContextWrapper, input: str) -> str: diff --git a/src/agents/agent_output.py b/src/agents/agent_output.py index 0c28800f..3262c57d 100644 --- a/src/agents/agent_output.py +++ b/src/agents/agent_output.py @@ -4,10 +4,10 @@ from pydantic import BaseModel, TypeAdapter from typing_extensions import TypedDict, get_args, get_origin -from . import _utils from .exceptions import ModelBehaviorError, UserError from .strict_schema import ensure_strict_json_schema from .tracing import SpanError +from .util import _error_tracing, _json _WRAPPER_DICT_KEY = "response" @@ -87,10 +87,10 @@ def validate_json(self, json_str: str, partial: bool = False) -> Any: """Validate a JSON string against the output type. Returns the validated object, or raises a `ModelBehaviorError` if the JSON is invalid. """ - validated = _utils.validate_json(json_str, self._type_adapter, partial) + validated = _json.validate_json(json_str, self._type_adapter, partial) if self._is_wrapped: if not isinstance(validated, dict): - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Invalid JSON", data={"details": f"Expected a dict, got {type(validated)}"}, @@ -101,7 +101,7 @@ def validate_json(self, json_str: str, partial: bool = False) -> Any: ) if _WRAPPER_DICT_KEY not in validated: - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Invalid JSON", data={"details": f"Could not find key {_WRAPPER_DICT_KEY} in JSON"}, diff --git a/src/agents/guardrail.py b/src/agents/guardrail.py index 5bebcd66..a96f0f7d 100644 --- a/src/agents/guardrail.py +++ b/src/agents/guardrail.py @@ -7,10 +7,10 @@ from typing_extensions import TypeVar -from ._utils import MaybeAwaitable from .exceptions import UserError from .items import TResponseInputItem from .run_context import RunContextWrapper, TContext +from .util._types import MaybeAwaitable if TYPE_CHECKING: from .agent import Agent diff --git a/src/agents/handoffs.py b/src/agents/handoffs.py index ac157401..686191f3 100644 --- a/src/agents/handoffs.py +++ b/src/agents/handoffs.py @@ -8,12 +8,12 @@ from pydantic import TypeAdapter from typing_extensions import TypeAlias, TypeVar -from . import _utils from .exceptions import ModelBehaviorError, UserError from .items import RunItem, TResponseInputItem from .run_context import RunContextWrapper, TContext from .strict_schema import ensure_strict_json_schema from .tracing.spans import SpanError +from .util import _error_tracing, _json, _transforms if TYPE_CHECKING: from .agent import Agent @@ -104,7 +104,7 @@ def get_transfer_message(self, agent: Agent[Any]) -> str: @classmethod def default_tool_name(cls, agent: Agent[Any]) -> str: - return _utils.transform_string_function_style(f"transfer_to_{agent.name}") + return _transforms.transform_string_function_style(f"transfer_to_{agent.name}") @classmethod def default_tool_description(cls, agent: Agent[Any]) -> str: @@ -192,7 +192,7 @@ async def _invoke_handoff( ) -> Agent[Any]: if input_type is not None and type_adapter is not None: if input_json is None: - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Handoff function expected non-null input, but got None", data={"details": "input_json is None"}, @@ -200,7 +200,7 @@ async def _invoke_handoff( ) raise ModelBehaviorError("Handoff function expected non-null input, but got None") - validated_input = _utils.validate_json( + validated_input = _json.validate_json( json_str=input_json, type_adapter=type_adapter, partial=False, diff --git a/src/agents/result.py b/src/agents/result.py index 6e806b72..40a64806 100644 --- a/src/agents/result.py +++ b/src/agents/result.py @@ -17,6 +17,7 @@ from .logger import logger from .stream_events import StreamEvent from .tracing import Trace +from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming if TYPE_CHECKING: from ._run_impl import QueueCompleteSentinel @@ -89,6 +90,9 @@ def last_agent(self) -> Agent[Any]: """The last agent that was run.""" return self._last_agent + def __str__(self) -> str: + return pretty_print_result(self) + @dataclass class RunResultStreaming(RunResultBase): @@ -216,3 +220,6 @@ def _cleanup_tasks(self): if self._output_guardrails_task and not self._output_guardrails_task.done(): self._output_guardrails_task.cancel() + + def __str__(self) -> str: + return pretty_print_run_result_streaming(self) diff --git a/src/agents/run.py b/src/agents/run.py index dfff7e38..934400fe 100644 --- a/src/agents/run.py +++ b/src/agents/run.py @@ -7,7 +7,6 @@ from openai.types.responses import ResponseCompletedEvent -from . import Model, _utils from ._run_impl import ( NextStepFinalOutput, NextStepHandoff, @@ -33,7 +32,7 @@ from .lifecycle import RunHooks from .logger import logger from .model_settings import ModelSettings -from .models.interface import ModelProvider +from .models.interface import Model, ModelProvider from .models.openai_provider import OpenAIProvider from .result import RunResult, RunResultStreaming from .run_context import RunContextWrapper, TContext @@ -41,6 +40,7 @@ from .tracing import Span, SpanError, agent_span, get_current_trace, trace from .tracing.span_data import AgentSpanData from .usage import Usage +from .util import _coro, _error_tracing DEFAULT_MAX_TURNS = 10 @@ -193,7 +193,7 @@ async def run( current_turn += 1 if current_turn > max_turns: - _utils.attach_error_to_span( + _error_tracing.attach_error_to_span( current_span, SpanError( message="Max turns exceeded", @@ -447,7 +447,7 @@ async def _run_input_guardrails_with_queue( for done in asyncio.as_completed(guardrail_tasks): result = await done if result.output.tripwire_triggered: - _utils.attach_error_to_span( + _error_tracing.attach_error_to_span( parent_span, SpanError( message="Guardrail tripwire triggered", @@ -511,7 +511,7 @@ async def _run_streamed_impl( streamed_result.current_turn = current_turn if current_turn > max_turns: - _utils.attach_error_to_span( + _error_tracing.attach_error_to_span( current_span, SpanError( message="Max turns exceeded", @@ -583,7 +583,7 @@ async def _run_streamed_impl( pass except Exception as e: if current_span: - _utils.attach_error_to_span( + _error_tracing.attach_error_to_span( current_span, SpanError( message="Error in agent run", @@ -615,7 +615,7 @@ async def _run_single_turn_streamed( ( agent.hooks.on_start(context_wrapper, agent) if agent.hooks - else _utils.noop_coroutine() + else _coro.noop_coroutine() ), ) @@ -705,7 +705,7 @@ async def _run_single_turn( ( agent.hooks.on_start(context_wrapper, agent) if agent.hooks - else _utils.noop_coroutine() + else _coro.noop_coroutine() ), ) @@ -796,7 +796,7 @@ async def _run_input_guardrails( # Cancel all guardrail tasks if a tripwire is triggered. for t in guardrail_tasks: t.cancel() - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Guardrail tripwire triggered", data={"guardrail": result.guardrail.get_name()}, @@ -834,7 +834,7 @@ async def _run_output_guardrails( # Cancel all guardrail tasks if a tripwire is triggered. for t in guardrail_tasks: t.cancel() - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Guardrail tripwire triggered", data={"guardrail": result.guardrail.get_name()}, diff --git a/src/agents/tool.py b/src/agents/tool.py index cbe87944..0baf2c0c 100644 --- a/src/agents/tool.py +++ b/src/agents/tool.py @@ -11,14 +11,15 @@ from pydantic import ValidationError from typing_extensions import Concatenate, ParamSpec -from . import _debug, _utils -from ._utils import MaybeAwaitable +from . import _debug from .computer import AsyncComputer, Computer from .exceptions import ModelBehaviorError from .function_schema import DocstringStyle, function_schema from .logger import logger from .run_context import RunContextWrapper from .tracing import SpanError +from .util import _error_tracing +from .util._types import MaybeAwaitable ToolParams = ParamSpec("ToolParams") @@ -263,7 +264,7 @@ async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> str: if inspect.isawaitable(result): return await result - _utils.attach_error_to_current_span( + _error_tracing.attach_error_to_current_span( SpanError( message="Error running tool (non-fatal)", data={ diff --git a/src/agents/util/__init__.py b/src/agents/util/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/agents/util/_coro.py b/src/agents/util/_coro.py new file mode 100644 index 00000000..647ab86a --- /dev/null +++ b/src/agents/util/_coro.py @@ -0,0 +1,2 @@ +async def noop_coroutine() -> None: + pass diff --git a/src/agents/util/_error_tracing.py b/src/agents/util/_error_tracing.py new file mode 100644 index 00000000..09dbb1de --- /dev/null +++ b/src/agents/util/_error_tracing.py @@ -0,0 +1,16 @@ +from typing import Any + +from ..logger import logger +from ..tracing import Span, SpanError, get_current_span + + +def attach_error_to_span(span: Span[Any], error: SpanError) -> None: + span.set_error(error) + + +def attach_error_to_current_span(error: SpanError) -> None: + span = get_current_span() + if span: + attach_error_to_span(span, error) + else: + logger.warning(f"No span to add error {error} to") diff --git a/src/agents/util/_json.py b/src/agents/util/_json.py new file mode 100644 index 00000000..1e081f68 --- /dev/null +++ b/src/agents/util/_json.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from typing import Literal + +from pydantic import TypeAdapter, ValidationError +from typing_extensions import TypeVar + +from ..exceptions import ModelBehaviorError +from ..tracing import SpanError +from ._error_tracing import attach_error_to_current_span + +T = TypeVar("T") + + +def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T: + partial_setting: bool | Literal["off", "on", "trailing-strings"] = ( + "trailing-strings" if partial else False + ) + try: + validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting) + return validated + except ValidationError as e: + attach_error_to_current_span( + SpanError( + message="Invalid JSON provided", + data={}, + ) + ) + raise ModelBehaviorError( + f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}" + ) from e diff --git a/src/agents/util/_pretty_print.py b/src/agents/util/_pretty_print.py new file mode 100644 index 00000000..afd3e2b1 --- /dev/null +++ b/src/agents/util/_pretty_print.py @@ -0,0 +1,56 @@ +from typing import TYPE_CHECKING + +from pydantic import BaseModel + +if TYPE_CHECKING: + from ..result import RunResult, RunResultBase, RunResultStreaming + + +def _indent(text: str, indent_level: int) -> str: + indent_string = " " * indent_level + return "\n".join(f"{indent_string}{line}" for line in text.splitlines()) + + +def _final_output_str(result: "RunResultBase") -> str: + if result.final_output is None: + return "None" + elif isinstance(result.final_output, str): + return result.final_output + elif isinstance(result.final_output, BaseModel): + return result.final_output.model_dump_json(indent=2) + else: + return str(result.final_output) + + +def pretty_print_result(result: "RunResult") -> str: + output = "RunResult:" + output += f'\n- Last agent: Agent(name="{result.last_agent.name}", ...)' + output += ( + f"\n- Final output ({type(result.final_output).__name__}):\n" + f"{_indent(_final_output_str(result), 2)}" + ) + output += f"\n- {len(result.new_items)} new item(s)" + output += f"\n- {len(result.raw_responses)} raw response(s)" + output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)" + output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)" + output += "\n(See `RunResult` for more details)" + + return output + + +def pretty_print_run_result_streaming(result: "RunResultStreaming") -> str: + output = "RunResultStreaming:" + output += f'\n- Current agent: Agent(name="{result.current_agent.name}", ...)' + output += f"\n- Current turn: {result.current_turn}" + output += f"\n- Max turns: {result.max_turns}" + output += f"\n- Is complete: {result.is_complete}" + output += ( + f"\n- Final output ({type(result.final_output).__name__}):\n" + f"{_indent(_final_output_str(result), 2)}" + ) + output += f"\n- {len(result.new_items)} new item(s)" + output += f"\n- {len(result.raw_responses)} raw response(s)" + output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)" + output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)" + output += "\n(See `RunResultStreaming` for more details)" + return output diff --git a/src/agents/util/_transforms.py b/src/agents/util/_transforms.py new file mode 100644 index 00000000..b303074d --- /dev/null +++ b/src/agents/util/_transforms.py @@ -0,0 +1,11 @@ +import re + + +def transform_string_function_style(name: str) -> str: + # Replace spaces with underscores + name = name.replace(" ", "_") + + # Replace non-alphanumeric characters with underscores + name = re.sub(r"[^a-zA-Z0-9]", "_", name) + + return name.lower() diff --git a/src/agents/util/_types.py b/src/agents/util/_types.py new file mode 100644 index 00000000..8571a694 --- /dev/null +++ b/src/agents/util/_types.py @@ -0,0 +1,7 @@ +from collections.abc import Awaitable +from typing import Union + +from typing_extensions import TypeVar + +T = TypeVar("T") +MaybeAwaitable = Union[Awaitable[T], T] diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 00000000..d68e067e --- /dev/null +++ b/tests/README.md @@ -0,0 +1,25 @@ +# Tests + +Before running any tests, make sure you have `uv` installed (and ideally run `make sync` after). + +## Running tests + +``` +make tests +``` + +## Snapshots + +We use [inline-snapshots](https://15r10nk.github.io/inline-snapshot/latest/) for some tests. If your code adds new snapshot tests or breaks existing ones, you can fix/create them. After fixing/creating snapshots, run `make tests` again to verify the tests pass. + +### Fixing snapshots + +``` +make snapshots-fix +``` + +### Creating snapshots + +``` +make snapshots-update +``` diff --git a/tests/test_function_tool_decorator.py b/tests/test_function_tool_decorator.py index b5816606..f146ec7f 100644 --- a/tests/test_function_tool_decorator.py +++ b/tests/test_function_tool_decorator.py @@ -175,12 +175,11 @@ def multiple_optional_params_function( return f"{x}_{y}_{z}" - @pytest.mark.asyncio async def test_multiple_optional_params_function(): tool = multiple_optional_params_function - input_data: dict[str,Any] = {} + input_data: dict[str, Any] = {} output = await tool.on_invoke_tool(ctx_wrapper(), json.dumps(input_data)) assert output == "42_hello_no_z" diff --git a/tests/test_output_tool.py b/tests/test_output_tool.py index 31ac984d..86c4b3b5 100644 --- a/tests/test_output_tool.py +++ b/tests/test_output_tool.py @@ -4,8 +4,9 @@ from pydantic import BaseModel from typing_extensions import TypedDict -from agents import Agent, AgentOutputSchema, ModelBehaviorError, Runner, UserError, _utils +from agents import Agent, AgentOutputSchema, ModelBehaviorError, Runner, UserError from agents.agent_output import _WRAPPER_DICT_KEY +from agents.util import _json def test_plain_text_output(): @@ -77,7 +78,7 @@ def test_bad_json_raises_error(mocker): output_schema = Runner._get_output_schema(agent) assert output_schema, "Should have an output tool config with a structured output type" - mock_validate_json = mocker.patch.object(_utils, "validate_json") + mock_validate_json = mocker.patch.object(_json, "validate_json") mock_validate_json.return_value = ["foo"] with pytest.raises(ModelBehaviorError): @@ -111,3 +112,4 @@ def test_setting_strict_false_works(): output_wrapper = AgentOutputSchema(output_type=Foo, strict_json_schema=False) assert not output_wrapper.strict_json_schema assert output_wrapper.json_schema() == Foo.model_json_schema() + assert output_wrapper.json_schema() == Foo.model_json_schema() diff --git a/tests/test_pretty_print.py b/tests/test_pretty_print.py new file mode 100644 index 00000000..b2218a27 --- /dev/null +++ b/tests/test_pretty_print.py @@ -0,0 +1,201 @@ +import json + +import pytest +from inline_snapshot import snapshot +from pydantic import BaseModel + +from agents import Agent, Runner +from agents.agent_output import _WRAPPER_DICT_KEY +from agents.util._pretty_print import pretty_print_result, pretty_print_run_result_streaming +from tests.fake_model import FakeModel + +from .test_responses import get_final_output_message, get_text_message + + +@pytest.mark.asyncio +async def test_pretty_result(): + model = FakeModel() + model.set_next_output([get_text_message("Hi there")]) + + agent = Agent(name="test_agent", model=model) + result = await Runner.run(agent, input="Hello") + + assert pretty_print_result(result) == snapshot("""\ +RunResult: +- Last agent: Agent(name="test_agent", ...) +- Final output (str): + Hi there +- 1 new item(s) +- 1 raw response(s) +- 0 input guardrail result(s) +- 0 output guardrail result(s) +(See `RunResult` for more details)\ +""") + + +@pytest.mark.asyncio +async def test_pretty_run_result_streaming(): + model = FakeModel() + model.set_next_output([get_text_message("Hi there")]) + + agent = Agent(name="test_agent", model=model) + result = Runner.run_streamed(agent, input="Hello") + async for _ in result.stream_events(): + pass + + assert pretty_print_run_result_streaming(result) == snapshot("""\ +RunResultStreaming: +- Current agent: Agent(name="test_agent", ...) +- Current turn: 1 +- Max turns: 10 +- Is complete: True +- Final output (str): + Hi there +- 1 new item(s) +- 1 raw response(s) +- 0 input guardrail result(s) +- 0 output guardrail result(s) +(See `RunResultStreaming` for more details)\ +""") + + +class Foo(BaseModel): + bar: str + + +@pytest.mark.asyncio +async def test_pretty_run_result_structured_output(): + model = FakeModel() + model.set_next_output( + [ + get_text_message("Test"), + get_final_output_message(Foo(bar="Hi there").model_dump_json()), + ] + ) + + agent = Agent(name="test_agent", model=model, output_type=Foo) + result = await Runner.run(agent, input="Hello") + + assert pretty_print_result(result) == snapshot("""\ +RunResult: +- Last agent: Agent(name="test_agent", ...) +- Final output (Foo): + { + "bar": "Hi there" + } +- 2 new item(s) +- 1 raw response(s) +- 0 input guardrail result(s) +- 0 output guardrail result(s) +(See `RunResult` for more details)\ +""") + + +@pytest.mark.asyncio +async def test_pretty_run_result_streaming_structured_output(): + model = FakeModel() + model.set_next_output( + [ + get_text_message("Test"), + get_final_output_message(Foo(bar="Hi there").model_dump_json()), + ] + ) + + agent = Agent(name="test_agent", model=model, output_type=Foo) + result = Runner.run_streamed(agent, input="Hello") + + async for _ in result.stream_events(): + pass + + assert pretty_print_run_result_streaming(result) == snapshot("""\ +RunResultStreaming: +- Current agent: Agent(name="test_agent", ...) +- Current turn: 1 +- Max turns: 10 +- Is complete: True +- Final output (Foo): + { + "bar": "Hi there" + } +- 2 new item(s) +- 1 raw response(s) +- 0 input guardrail result(s) +- 0 output guardrail result(s) +(See `RunResultStreaming` for more details)\ +""") + + +@pytest.mark.asyncio +async def test_pretty_run_result_list_structured_output(): + model = FakeModel() + model.set_next_output( + [ + get_text_message("Test"), + get_final_output_message( + json.dumps( + { + _WRAPPER_DICT_KEY: [ + Foo(bar="Hi there").model_dump(), + Foo(bar="Hi there 2").model_dump(), + ] + } + ) + ), + ] + ) + + agent = Agent(name="test_agent", model=model, output_type=list[Foo]) + result = await Runner.run(agent, input="Hello") + + assert pretty_print_result(result) == snapshot("""\ +RunResult: +- Last agent: Agent(name="test_agent", ...) +- Final output (list): + [Foo(bar='Hi there'), Foo(bar='Hi there 2')] +- 2 new item(s) +- 1 raw response(s) +- 0 input guardrail result(s) +- 0 output guardrail result(s) +(See `RunResult` for more details)\ +""") + + +@pytest.mark.asyncio +async def test_pretty_run_result_streaming_list_structured_output(): + model = FakeModel() + model.set_next_output( + [ + get_text_message("Test"), + get_final_output_message( + json.dumps( + { + _WRAPPER_DICT_KEY: [ + Foo(bar="Test").model_dump(), + Foo(bar="Test 2").model_dump(), + ] + } + ) + ), + ] + ) + + agent = Agent(name="test_agent", model=model, output_type=list[Foo]) + result = Runner.run_streamed(agent, input="Hello") + + async for _ in result.stream_events(): + pass + + assert pretty_print_run_result_streaming(result) == snapshot("""\ +RunResultStreaming: +- Current agent: Agent(name="test_agent", ...) +- Current turn: 1 +- Max turns: 10 +- Is complete: True +- Final output (list): + [Foo(bar='Test'), Foo(bar='Test 2')] +- 2 new item(s) +- 1 raw response(s) +- 0 input guardrail result(s) +- 0 output guardrail result(s) +(See `RunResultStreaming` for more details)\ +""") diff --git a/uv.lock b/uv.lock index c3af99bd..2c2e05b3 100644 --- a/uv.lock +++ b/uv.lock @@ -1,4 +1,5 @@ version = 1 +revision = 1 requires-python = ">=3.9" [[package]] @@ -25,6 +26,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 }, ] +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, +] + [[package]] name = "babel" version = "2.17.0" @@ -239,6 +249,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 }, ] +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, +] + [[package]] name = "ghp-import" version = "2.1.0" @@ -391,6 +410,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, ] +[[package]] +name = "inline-snapshot" +version = "0.20.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "rich" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b0/41/9bd2ecd10ef789e8aff6fb68dcc7677dc31b33b2d27c306c0d40fc982fbc/inline_snapshot-0.20.7.tar.gz", hash = "sha256:d55bbb6254d0727dc304729ca7998cde1c1e984c4bf50281514aa9d727a56cf2", size = 92643 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/8f/1bf23da63ad1a0b14ca2d9114700123ef76732e375548f4f9ca94052817e/inline_snapshot-0.20.7-py3-none-any.whl", hash = "sha256:2df6dd8710d1f0def2c1f9d6c25fd03d7beba01f3addf52fc370343d9ee9959f", size = 48108 }, +] + [[package]] name = "jinja2" version = "3.1.6" @@ -796,6 +830,7 @@ dependencies = [ [package.dev-dependencies] dev = [ { name = "coverage" }, + { name = "inline-snapshot" }, { name = "mkdocs" }, { name = "mkdocs-material" }, { name = "mkdocstrings", extra = ["python"] }, @@ -821,6 +856,7 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ { name = "coverage", specifier = ">=7.6.12" }, + { name = "inline-snapshot", specifier = ">=0.20.7" }, { name = "mkdocs", specifier = ">=1.6.0" }, { name = "mkdocs-material", specifier = ">=9.6.0" }, { name = "mkdocstrings", extras = ["python"], specifier = ">=0.28.0" },