|
12 | 12 | from typing_extensions import Self, TypeIs, TypeVar |
13 | 13 |
|
14 | 14 | from pydantic_graph import End |
15 | | -from pydantic_graph._utils import get_event_loop |
16 | 15 |
|
17 | 16 | from .. import ( |
18 | 17 | _agent_graph, |
@@ -335,7 +334,7 @@ def run_sync( |
335 | 334 | if infer_name and self.name is None: |
336 | 335 | self._infer_name(inspect.currentframe()) |
337 | 336 |
|
338 | | - return get_event_loop().run_until_complete( |
| 337 | + return _utils.get_event_loop().run_until_complete( |
339 | 338 | self.run( |
340 | 339 | user_prompt, |
341 | 340 | output_type=output_type, |
@@ -581,6 +580,133 @@ async def on_complete() -> None: |
581 | 580 | if not yielded: |
582 | 581 | raise exceptions.AgentRunError('Agent run finished without producing a final result') # pragma: no cover |
583 | 582 |
|
| 583 | + @overload |
| 584 | + def run_stream_sync( |
| 585 | + self, |
| 586 | + user_prompt: str | Sequence[_messages.UserContent] | None = None, |
| 587 | + *, |
| 588 | + output_type: None = None, |
| 589 | + message_history: Sequence[_messages.ModelMessage] | None = None, |
| 590 | + deferred_tool_results: DeferredToolResults | None = None, |
| 591 | + model: models.Model | models.KnownModelName | str | None = None, |
| 592 | + deps: AgentDepsT = None, |
| 593 | + model_settings: ModelSettings | None = None, |
| 594 | + usage_limits: _usage.UsageLimits | None = None, |
| 595 | + usage: _usage.RunUsage | None = None, |
| 596 | + infer_name: bool = True, |
| 597 | + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, |
| 598 | + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, |
| 599 | + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, |
| 600 | + ) -> result.StreamedRunResultSync[AgentDepsT, OutputDataT]: ... |
| 601 | + |
| 602 | + @overload |
| 603 | + def run_stream_sync( |
| 604 | + self, |
| 605 | + user_prompt: str | Sequence[_messages.UserContent] | None = None, |
| 606 | + *, |
| 607 | + output_type: OutputSpec[RunOutputDataT], |
| 608 | + message_history: Sequence[_messages.ModelMessage] | None = None, |
| 609 | + deferred_tool_results: DeferredToolResults | None = None, |
| 610 | + model: models.Model | models.KnownModelName | str | None = None, |
| 611 | + deps: AgentDepsT = None, |
| 612 | + model_settings: ModelSettings | None = None, |
| 613 | + usage_limits: _usage.UsageLimits | None = None, |
| 614 | + usage: _usage.RunUsage | None = None, |
| 615 | + infer_name: bool = True, |
| 616 | + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, |
| 617 | + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, |
| 618 | + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, |
| 619 | + ) -> result.StreamedRunResultSync[AgentDepsT, RunOutputDataT]: ... |
| 620 | + |
| 621 | + def run_stream_sync( |
| 622 | + self, |
| 623 | + user_prompt: str | Sequence[_messages.UserContent] | None = None, |
| 624 | + *, |
| 625 | + output_type: OutputSpec[RunOutputDataT] | None = None, |
| 626 | + message_history: Sequence[_messages.ModelMessage] | None = None, |
| 627 | + deferred_tool_results: DeferredToolResults | None = None, |
| 628 | + model: models.Model | models.KnownModelName | str | None = None, |
| 629 | + deps: AgentDepsT = None, |
| 630 | + model_settings: ModelSettings | None = None, |
| 631 | + usage_limits: _usage.UsageLimits | None = None, |
| 632 | + usage: _usage.RunUsage | None = None, |
| 633 | + infer_name: bool = True, |
| 634 | + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, |
| 635 | + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, |
| 636 | + event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, |
| 637 | + ) -> result.StreamedRunResultSync[AgentDepsT, Any]: |
| 638 | + """Run the agent with a user prompt in sync streaming mode. |
| 639 | +
|
| 640 | + This is a convenience method that wraps [`run_stream()`][pydantic_ai.agent.AbstractAgent.run_stream] with `loop.run_until_complete(...)`. |
| 641 | + You therefore can't use this method inside async code or if there's an active event loop. |
| 642 | +
|
| 643 | + This method builds an internal agent graph (using system prompts, tools and output schemas) and then |
| 644 | + runs the graph until the model produces output matching the `output_type`, for example text or structured data. |
| 645 | + At this point, a streaming run result object is yielded from which you can stream the output as it comes in, |
| 646 | + and -- once this output has completed streaming -- get the complete output, message history, and usage. |
| 647 | +
|
| 648 | + As this method will consider the first output matching the `output_type` to be the final output, |
| 649 | + it will stop running the agent graph and will not execute any tool calls made by the model after this "final" output. |
| 650 | + If you want to always run the agent graph to completion and stream events and output at the same time, |
| 651 | + use [`agent.run()`][pydantic_ai.agent.AbstractAgent.run] with an `event_stream_handler` or [`agent.iter()`][pydantic_ai.agent.AbstractAgent.iter] instead. |
| 652 | +
|
| 653 | + Example: |
| 654 | + ```python |
| 655 | + from pydantic_ai import Agent |
| 656 | +
|
| 657 | + agent = Agent('openai:gpt-4o') |
| 658 | +
|
| 659 | + def main(): |
| 660 | + response = agent.run_stream_sync('What is the capital of the UK?') |
| 661 | + print(response.get_output()) |
| 662 | + #> The capital of the UK is London. |
| 663 | + ``` |
| 664 | +
|
| 665 | + Args: |
| 666 | + user_prompt: User input to start/continue the conversation. |
| 667 | + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no |
| 668 | + output validators since output validators would expect an argument that matches the agent's output type. |
| 669 | + message_history: History of the conversation so far. |
| 670 | + deferred_tool_results: Optional results for deferred tool calls in the message history. |
| 671 | + model: Optional model to use for this run, required if `model` was not set when creating the agent. |
| 672 | + deps: Optional dependencies to use for this run. |
| 673 | + model_settings: Optional settings to use for this model's request. |
| 674 | + usage_limits: Optional limits on model request count or token usage. |
| 675 | + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. |
| 676 | + infer_name: Whether to try to infer the agent name from the call frame if it's not set. |
| 677 | + toolsets: Optional additional toolsets for this run. |
| 678 | + builtin_tools: Optional additional builtin tools for this run. |
| 679 | + event_stream_handler: Optional handler for events from the model's streaming response and the agent's execution of tools to use for this run. |
| 680 | + It will receive all the events up until the final result is found, which you can then read or stream from inside the context manager. |
| 681 | + Note that it does _not_ receive any events after the final result is found. |
| 682 | +
|
| 683 | + Returns: |
| 684 | + The result of the run. |
| 685 | + """ |
| 686 | + if infer_name and self.name is None: |
| 687 | + self._infer_name(inspect.currentframe()) |
| 688 | + |
| 689 | + async def _consume_stream(): |
| 690 | + async with self.run_stream( |
| 691 | + user_prompt, |
| 692 | + output_type=output_type, |
| 693 | + message_history=message_history, |
| 694 | + deferred_tool_results=deferred_tool_results, |
| 695 | + model=model, |
| 696 | + deps=deps, |
| 697 | + model_settings=model_settings, |
| 698 | + usage_limits=usage_limits, |
| 699 | + usage=usage, |
| 700 | + infer_name=infer_name, |
| 701 | + toolsets=toolsets, |
| 702 | + builtin_tools=builtin_tools, |
| 703 | + event_stream_handler=event_stream_handler, |
| 704 | + ) as stream_result: |
| 705 | + yield stream_result |
| 706 | + |
| 707 | + async_result = _utils.get_event_loop().run_until_complete(anext(_consume_stream())) |
| 708 | + return result.StreamedRunResultSync(async_result) |
| 709 | + |
584 | 710 | @overload |
585 | 711 | def run_stream_events( |
586 | 712 | self, |
@@ -1217,6 +1343,6 @@ def to_cli_sync( |
1217 | 1343 | agent.to_cli_sync(prog_name='assistant') |
1218 | 1344 | ``` |
1219 | 1345 | """ |
1220 | | - return get_event_loop().run_until_complete( |
| 1346 | + return _utils.get_event_loop().run_until_complete( |
1221 | 1347 | self.to_cli(deps=deps, prog_name=prog_name, message_history=message_history) |
1222 | 1348 | ) |
0 commit comments