Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 29 additions & 3 deletions src/strands/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
from ..tools.watcher import ToolWatcher
from ..types._events import AgentResultEvent, InitEventLoopEvent, ModelStreamChunkEvent, ToolInterruptEvent, TypedEvent
from ..types.agent import AgentInput
from ..types.content import ContentBlock, Message, Messages
from ..types.content import ContentBlock, Message, Messages, SystemContentBlock
from ..types.exceptions import ContextWindowOverflowException
from ..types.interrupt import InterruptResponseContent
from ..types.tools import ToolResult, ToolUse
Expand Down Expand Up @@ -216,7 +216,7 @@ def __init__(
model: Union[Model, str, None] = None,
messages: Optional[Messages] = None,
tools: Optional[list[Union[str, dict[str, str], "ToolProvider", Any]]] = None,
system_prompt: Optional[str] = None,
system_prompt: Optional[str | list[SystemContentBlock]] = None,
structured_output_model: Optional[Type[BaseModel]] = None,
callback_handler: Optional[
Union[Callable[..., Any], _DefaultCallbackHandlerSentinel]
Expand Down Expand Up @@ -253,6 +253,7 @@ def __init__(

If provided, only these tools will be available. If None, all tools will be available.
system_prompt: System prompt to guide model behavior.
Can be a string or a list of SystemContentBlock objects for advanced features like caching.
If None, the model will behave according to its default settings.
structured_output_model: Pydantic model type(s) for structured output.
When specified, all agent calls will attempt to return structured output of this type.
Expand Down Expand Up @@ -287,7 +288,8 @@ def __init__(
"""
self.model = BedrockModel() if not model else BedrockModel(model_id=model) if isinstance(model, str) else model
self.messages = messages if messages is not None else []
self.system_prompt = system_prompt
# initializing self.system_prompt for backwards compatibility
self.system_prompt, self._system_prompt_content = self._initialize_system_prompt(system_prompt)
self._default_structured_output_model = structured_output_model
self.agent_id = _identifier.validate(agent_id or _DEFAULT_AGENT_ID, _identifier.Identifier.AGENT)
self.name = name or _DEFAULT_AGENT_NAME
Expand Down Expand Up @@ -965,6 +967,30 @@ def _filter_tool_parameters_for_recording(self, tool_name: str, input_params: di
properties = tool_spec["inputSchema"]["json"]["properties"]
return {k: v for k, v in input_params.items() if k in properties}

def _initialize_system_prompt(
self, system_prompt: str | list[SystemContentBlock] | None
) -> tuple[str | None, list[SystemContentBlock] | None]:
"""Initialize system prompt fields from constructor input.

Maintains backwards compatibility by keeping system_prompt as str when string input
provided, avoiding breaking existing consumers.

Maps system_prompt input to both string and content block representations:
- If string: system_prompt=string, _system_prompt_content=[{text: string}]
- If list with text elements: system_prompt=concatenated_text, _system_prompt_content=list
- If list without text elements: system_prompt=None, _system_prompt_content=list
- If None: system_prompt=None, _system_prompt_content=None
"""
if isinstance(system_prompt, str):
return system_prompt, [{"text": system_prompt}]
elif isinstance(system_prompt, list):
# Concatenate all text elements for backwards compatibility, None if no text found
text_parts = [block["text"] for block in system_prompt if "text" in block]
system_prompt_str = "\n".join(text_parts) if text_parts else None
return system_prompt_str, system_prompt
else:
return None, None

def _append_message(self, message: Message) -> None:
"""Appends a message to the agent's list of messages and invokes the callbacks for the MessageCreatedEvent."""
self.messages.append(message)
Expand Down
7 changes: 6 additions & 1 deletion src/strands/event_loop/event_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,12 @@ async def _handle_model_execution(
tool_specs = agent.tool_registry.get_all_tool_specs()
try:
async for event in stream_messages(
agent.model, agent.system_prompt, agent.messages, tool_specs, structured_output_context.tool_choice
agent.model,
agent.system_prompt,
agent.messages,
tool_specs,
system_prompt_content=agent._system_prompt_content,
tool_choice=structured_output_context.tool_choice,
):
yield event

Expand Down
19 changes: 16 additions & 3 deletions src/strands/event_loop/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
TypedEvent,
)
from ..types.citations import CitationsContentBlock
from ..types.content import ContentBlock, Message, Messages
from ..types.content import ContentBlock, Message, Messages, SystemContentBlock
from ..types.streaming import (
ContentBlockDeltaEvent,
ContentBlockStart,
Expand Down Expand Up @@ -418,16 +418,22 @@ async def stream_messages(
system_prompt: Optional[str],
messages: Messages,
tool_specs: list[ToolSpec],
*,
tool_choice: Optional[Any] = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
**kwargs: Any,
) -> AsyncGenerator[TypedEvent, None]:
"""Streams messages to the model and processes the response.

Args:
model: Model provider.
system_prompt: The system prompt to send.
system_prompt: The system prompt string, used for backwards compatibility with models that expect it.
messages: List of messages to send.
tool_specs: The list of tool specs.
tool_choice: Optional tool choice constraint for forcing specific tool usage.
system_prompt_content: The authoritative system prompt content blocks that always contains the
system prompt data.
**kwargs: Additional keyword arguments for future extensibility.

Yields:
The reason for stopping, the final message, and the usage metrics
Expand All @@ -436,7 +442,14 @@ async def stream_messages(

messages = _normalize_messages(messages)
start_time = time.time()
chunks = model.stream(messages, tool_specs if tool_specs else None, system_prompt, tool_choice=tool_choice)

chunks = model.stream(
messages,
tool_specs if tool_specs else None,
system_prompt,
tool_choice=tool_choice,
system_prompt_content=system_prompt_content,
)

async for event in process_stream(chunks, start_time):
yield event
36 changes: 25 additions & 11 deletions src/strands/models/bedrock.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from ..event_loop import streaming
from ..tools import convert_pydantic_to_tool_spec
from ..tools._tool_helpers import noop_tool
from ..types.content import ContentBlock, Messages
from ..types.content import ContentBlock, Messages, SystemContentBlock
from ..types.exceptions import (
ContextWindowOverflowException,
ModelThrottledException,
Expand Down Expand Up @@ -187,11 +187,11 @@ def get_config(self) -> BedrockConfig:
"""
return self.config

def format_request(
def _format_request(
self,
messages: Messages,
tool_specs: Optional[list[ToolSpec]] = None,
system_prompt: Optional[str] = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
tool_choice: ToolChoice | None = None,
) -> dict[str, Any]:
"""Format a Bedrock converse stream request.
Expand All @@ -201,6 +201,7 @@ def format_request(
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks to provide context to the model.

Returns:
A Bedrock converse stream request.
Expand All @@ -211,13 +212,20 @@ def format_request(
)
if has_tool_content:
tool_specs = [noop_tool.tool_spec]

# Use system_prompt_content directly (copy for mutability)
system_blocks: list[SystemContentBlock] = system_prompt_content.copy() if system_prompt_content else []
# Add cache point if configured (backwards compatibility)
if cache_prompt := self.config.get("cache_prompt"):
warnings.warn(
"cache_prompt is deprecated. Use SystemContentBlock with cachePoint instead.", UserWarning, stacklevel=3
)
system_blocks.append({"cachePoint": {"type": cache_prompt}})

return {
"modelId": self.config["model_id"],
"messages": self._format_bedrock_messages(messages),
"system": [
*([{"text": system_prompt}] if system_prompt else []),
*([{"cachePoint": {"type": self.config["cache_prompt"]}}] if self.config.get("cache_prompt") else []),
],
"system": system_blocks,
**(
{
"toolConfig": {
Expand Down Expand Up @@ -590,6 +598,7 @@ async def stream(
system_prompt: Optional[str] = None,
*,
tool_choice: ToolChoice | None = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
**kwargs: Any,
) -> AsyncGenerator[StreamEvent, None]:
"""Stream conversation with the Bedrock model.
Expand All @@ -602,6 +611,7 @@ async def stream(
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks to provide context to the model.
**kwargs: Additional keyword arguments for future extensibility.

Yields:
Expand All @@ -620,7 +630,11 @@ def callback(event: Optional[StreamEvent] = None) -> None:
loop = asyncio.get_event_loop()
queue: asyncio.Queue[Optional[StreamEvent]] = asyncio.Queue()

thread = asyncio.to_thread(self._stream, callback, messages, tool_specs, system_prompt, tool_choice)
# Handle backward compatibility: if system_prompt is provided but system_prompt_content is None
if system_prompt and system_prompt_content is None:
system_prompt_content = [{"text": system_prompt}]

thread = asyncio.to_thread(self._stream, callback, messages, tool_specs, system_prompt_content, tool_choice)
task = asyncio.create_task(thread)

while True:
Expand All @@ -637,7 +651,7 @@ def _stream(
callback: Callable[..., None],
messages: Messages,
tool_specs: Optional[list[ToolSpec]] = None,
system_prompt: Optional[str] = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
tool_choice: ToolChoice | None = None,
) -> None:
"""Stream conversation with the Bedrock model.
Expand All @@ -649,7 +663,7 @@ def _stream(
callback: Function to send events to the main thread.
messages: List of message objects to be processed by the model.
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
system_prompt_content: System prompt content blocks to provide context to the model.
tool_choice: Selection strategy for tool invocation.

Raises:
Expand All @@ -658,7 +672,7 @@ def _stream(
"""
try:
logger.debug("formatting request")
request = self.format_request(messages, tool_specs, system_prompt, tool_choice)
request = self._format_request(messages, tool_specs, system_prompt_content, tool_choice)
logger.debug("request=<%s>", request)

logger.debug("invoking model")
Expand Down
4 changes: 3 additions & 1 deletion src/strands/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from pydantic import BaseModel

from ..types.content import Messages
from ..types.content import Messages, SystemContentBlock
from ..types.streaming import StreamEvent
from ..types.tools import ToolChoice, ToolSpec

Expand Down Expand Up @@ -72,6 +72,7 @@ def stream(
system_prompt: Optional[str] = None,
*,
tool_choice: ToolChoice | None = None,
system_prompt_content: list[SystemContentBlock] | None = None,
**kwargs: Any,
) -> AsyncIterable[StreamEvent]:
"""Stream conversation with the model.
Expand All @@ -87,6 +88,7 @@ def stream(
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks for advanced features like caching.
**kwargs: Additional keyword arguments for future extensibility.

Yields:
Expand Down
4 changes: 2 additions & 2 deletions src/strands/types/content.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,11 @@ class SystemContentBlock(TypedDict, total=False):
"""Contains configurations for instructions to provide the model for how to handle input.

Attributes:
guardContent: A content block to assess with the guardrail.
cachePoint: A cache point configuration to optimize conversation history.
text: A system prompt for the model.
"""

guardContent: GuardContent
cachePoint: CachePoint
text: str


Expand Down
3 changes: 3 additions & 0 deletions tests/fixtures/mocked_model_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ async def stream(
tool_specs: Optional[list[ToolSpec]] = None,
system_prompt: Optional[str] = None,
tool_choice: Optional[Any] = None,
*,
system_prompt_content=None,
**kwargs: Any,
) -> AsyncGenerator[Any, None]:
events = self.map_agent_message_to_events(self.agent_responses[self.index])
for event in events:
Expand Down
Loading
Loading