Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 29 additions & 3 deletions src/strands/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
from ..tools.watcher import ToolWatcher
from ..types._events import AgentResultEvent, InitEventLoopEvent, ModelStreamChunkEvent, ToolInterruptEvent, TypedEvent
from ..types.agent import AgentInput
from ..types.content import ContentBlock, Message, Messages
from ..types.content import ContentBlock, Message, Messages, SystemContentBlock
from ..types.exceptions import ContextWindowOverflowException
from ..types.interrupt import InterruptResponseContent
from ..types.tools import ToolResult, ToolUse
Expand Down Expand Up @@ -216,7 +216,7 @@ def __init__(
model: Union[Model, str, None] = None,
messages: Optional[Messages] = None,
tools: Optional[list[Union[str, dict[str, str], "ToolProvider", Any]]] = None,
system_prompt: Optional[str] = None,
system_prompt: Optional[str | list[SystemContentBlock]] = None,
structured_output_model: Optional[Type[BaseModel]] = None,
callback_handler: Optional[
Union[Callable[..., Any], _DefaultCallbackHandlerSentinel]
Expand Down Expand Up @@ -253,6 +253,7 @@ def __init__(

If provided, only these tools will be available. If None, all tools will be available.
system_prompt: System prompt to guide model behavior.
Can be a string or a list of SystemContentBlock objects for advanced features like caching.
If None, the model will behave according to its default settings.
structured_output_model: Pydantic model type(s) for structured output.
When specified, all agent calls will attempt to return structured output of this type.
Expand Down Expand Up @@ -287,7 +288,8 @@ def __init__(
"""
self.model = BedrockModel() if not model else BedrockModel(model_id=model) if isinstance(model, str) else model
self.messages = messages if messages is not None else []
self.system_prompt = system_prompt
# initializing self.system_prompt for backwards compatibility
self.system_prompt, self._system_prompt_content = self._initialize_system_prompt(system_prompt)
self._default_structured_output_model = structured_output_model
self.agent_id = _identifier.validate(agent_id or _DEFAULT_AGENT_ID, _identifier.Identifier.AGENT)
self.name = name or _DEFAULT_AGENT_NAME
Expand Down Expand Up @@ -965,6 +967,30 @@ def _filter_tool_parameters_for_recording(self, tool_name: str, input_params: di
properties = tool_spec["inputSchema"]["json"]["properties"]
return {k: v for k, v in input_params.items() if k in properties}

def _initialize_system_prompt(
self, system_prompt: str | list[SystemContentBlock] | None
) -> tuple[str | None, list[SystemContentBlock] | None]:
"""Initialize system prompt fields from constructor input.

Maintains backwards compatibility by keeping system_prompt as str when string input
provided, avoiding breaking existing consumers.

Maps system_prompt input to both string and content block representations:
- If string: system_prompt=string, _system_prompt_content=[{text: string}]
- If list with text elements: system_prompt=concatenated_text, _system_prompt_content=list
- If list without text elements: system_prompt=None, _system_prompt_content=list
- If None: system_prompt=None, _system_prompt_content=None
"""
if isinstance(system_prompt, str):
return system_prompt, [{"text": system_prompt}]
elif isinstance(system_prompt, list):
# Concatenate all text elements for backwards compatibility, None if no text found
text_parts = [block["text"] for block in system_prompt if "text" in block]
system_prompt_str = "\n".join(text_parts) if text_parts else None
return system_prompt_str, system_prompt
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What if instead we just did None, system_prompt? Is there a reason we have to force generate a self.system_prompt when a customer passes in a list? They are opting in to a new feature and so I wouldn't expect it to break them.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

so when we call model.stream we still need to pass system_prompt. Even if a user opted in to passing this additional content, that should not break a model implementation.

So my thinking is that we will need to have the system_prompt_content -> system_prompt transformation regardless. So it is better to keep it and expose it as agent.system_prompt so it is minimally breaking in the event a user is doing "agent.system_prompt" or if a session_manager, hook, model, etc is itself making a call to agent.system_prompt

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah gotcha. So rather than requiring them to make changes in multiple places at once, they could do so gradually or be content with the transformation.

else:
return None, None

def _append_message(self, message: Message) -> None:
"""Appends a message to the agent's list of messages and invokes the callbacks for the MessageCreatedEvent."""
self.messages.append(message)
Expand Down
7 changes: 6 additions & 1 deletion src/strands/event_loop/event_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,12 @@ async def _handle_model_execution(
tool_specs = agent.tool_registry.get_all_tool_specs()
try:
async for event in stream_messages(
agent.model, agent.system_prompt, agent.messages, tool_specs, structured_output_context.tool_choice
agent.model,
agent.system_prompt,
agent.messages,
tool_specs,
system_prompt_content=agent._system_prompt_content,
tool_choice=structured_output_context.tool_choice,
):
yield event

Expand Down
19 changes: 16 additions & 3 deletions src/strands/event_loop/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
TypedEvent,
)
from ..types.citations import CitationsContentBlock
from ..types.content import ContentBlock, Message, Messages
from ..types.content import ContentBlock, Message, Messages, SystemContentBlock
from ..types.streaming import (
ContentBlockDeltaEvent,
ContentBlockStart,
Expand Down Expand Up @@ -418,16 +418,22 @@ async def stream_messages(
system_prompt: Optional[str],
messages: Messages,
tool_specs: list[ToolSpec],
*,
tool_choice: Optional[Any] = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
**kwargs: Any,
) -> AsyncGenerator[TypedEvent, None]:
"""Streams messages to the model and processes the response.

Args:
model: Model provider.
system_prompt: The system prompt to send.
system_prompt: The system prompt string, used for backwards compatibility with models that expect it.
messages: List of messages to send.
tool_specs: The list of tool specs.
tool_choice: Optional tool choice constraint for forcing specific tool usage.
system_prompt_content: The authoritative system prompt content blocks that always contains the
system prompt data.
**kwargs: Additional keyword arguments for future extensibility.

Yields:
The reason for stopping, the final message, and the usage metrics
Expand All @@ -436,7 +442,14 @@ async def stream_messages(

messages = _normalize_messages(messages)
start_time = time.time()
chunks = model.stream(messages, tool_specs if tool_specs else None, system_prompt, tool_choice=tool_choice)

chunks = model.stream(
messages,
tool_specs if tool_specs else None,
system_prompt,
tool_choice=tool_choice,
system_prompt_content=system_prompt_content,
)

async for event in process_stream(chunks, start_time):
yield event
32 changes: 21 additions & 11 deletions src/strands/models/bedrock.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from ..event_loop import streaming
from ..tools import convert_pydantic_to_tool_spec
from ..tools._tool_helpers import noop_tool
from ..types.content import ContentBlock, Messages
from ..types.content import ContentBlock, Messages, SystemContentBlock
from ..types.exceptions import (
ContextWindowOverflowException,
ModelThrottledException,
Expand Down Expand Up @@ -187,11 +187,11 @@ def get_config(self) -> BedrockConfig:
"""
return self.config

def format_request(
def _format_request(
self,
messages: Messages,
tool_specs: Optional[list[ToolSpec]] = None,
system_prompt: Optional[str] = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
tool_choice: ToolChoice | None = None,
) -> dict[str, Any]:
"""Format a Bedrock converse stream request.
Expand All @@ -201,6 +201,7 @@ def format_request(
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks to provide context to the model.

Returns:
A Bedrock converse stream request.
Expand All @@ -211,13 +212,20 @@ def format_request(
)
if has_tool_content:
tool_specs = [noop_tool.tool_spec]

# Use system_prompt_content directly (copy for mutability)
system_blocks: list[SystemContentBlock] = system_prompt_content.copy() if system_prompt_content else []
# Add cache point if configured (backwards compatibility)
if cache_prompt := self.config.get("cache_prompt"):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What happens if someone already specified cache points in their system_prompt_cache? Won't this lead to duplicated entries?

Copy link
Member Author

@dbschmigelski dbschmigelski Oct 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, but it won't it still works. And since it does, I wanted to avoid making assumptions about ordering. Like if cache_prompt is present and the last item is CachePoint then we dedupe. It doesn't break and we emit a warning.

Adding a test to demonstrate this

warnings.warn(
"cache_prompt is deprecated. Use SystemContentBlock with cachePoint instead.", UserWarning, stacklevel=3
)
system_blocks.append({"cachePoint": {"type": cache_prompt}})

return {
"modelId": self.config["model_id"],
"messages": self._format_bedrock_messages(messages),
"system": [
*([{"text": system_prompt}] if system_prompt else []),
*([{"cachePoint": {"type": self.config["cache_prompt"]}}] if self.config.get("cache_prompt") else []),
],
"system": system_blocks,
**(
{
"toolConfig": {
Expand Down Expand Up @@ -590,6 +598,7 @@ async def stream(
system_prompt: Optional[str] = None,
*,
tool_choice: ToolChoice | None = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
**kwargs: Any,
) -> AsyncGenerator[StreamEvent, None]:
"""Stream conversation with the Bedrock model.
Expand All @@ -602,6 +611,7 @@ async def stream(
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks to provide context to the model.
**kwargs: Additional keyword arguments for future extensibility.

Yields:
Expand All @@ -620,7 +630,7 @@ def callback(event: Optional[StreamEvent] = None) -> None:
loop = asyncio.get_event_loop()
queue: asyncio.Queue[Optional[StreamEvent]] = asyncio.Queue()

thread = asyncio.to_thread(self._stream, callback, messages, tool_specs, system_prompt, tool_choice)
thread = asyncio.to_thread(self._stream, callback, messages, tool_specs, system_prompt_content, tool_choice)
task = asyncio.create_task(thread)

while True:
Expand All @@ -637,7 +647,7 @@ def _stream(
callback: Callable[..., None],
messages: Messages,
tool_specs: Optional[list[ToolSpec]] = None,
system_prompt: Optional[str] = None,
system_prompt_content: Optional[list[SystemContentBlock]] = None,
tool_choice: ToolChoice | None = None,
) -> None:
"""Stream conversation with the Bedrock model.
Expand All @@ -649,7 +659,7 @@ def _stream(
callback: Function to send events to the main thread.
messages: List of message objects to be processed by the model.
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
system_prompt_content: System prompt content blocks to provide context to the model.
tool_choice: Selection strategy for tool invocation.

Raises:
Expand All @@ -658,7 +668,7 @@ def _stream(
"""
try:
logger.debug("formatting request")
request = self.format_request(messages, tool_specs, system_prompt, tool_choice)
request = self._format_request(messages, tool_specs, system_prompt_content, tool_choice)
logger.debug("request=<%s>", request)

logger.debug("invoking model")
Expand Down
4 changes: 3 additions & 1 deletion src/strands/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from pydantic import BaseModel

from ..types.content import Messages
from ..types.content import Messages, SystemContentBlock
from ..types.streaming import StreamEvent
from ..types.tools import ToolChoice, ToolSpec

Expand Down Expand Up @@ -72,6 +72,7 @@ def stream(
system_prompt: Optional[str] = None,
*,
tool_choice: ToolChoice | None = None,
system_prompt_content: list[SystemContentBlock] | None = None,
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Noting that I do not like this. But we are breaking if we did something like

 system_prompt: Optional[str | list[SystemContentBlock]] = None,

Even if we only passed in str if the user passed in str, which was considered, the typing is still breaking for mypy users.

**kwargs: Any,
) -> AsyncIterable[StreamEvent]:
"""Stream conversation with the model.
Expand All @@ -87,6 +88,7 @@ def stream(
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks for advanced features like caching.
**kwargs: Additional keyword arguments for future extensibility.

Yields:
Expand Down
4 changes: 2 additions & 2 deletions src/strands/types/content.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,11 @@ class SystemContentBlock(TypedDict, total=False):
"""Contains configurations for instructions to provide the model for how to handle input.

Attributes:
guardContent: A content block to assess with the guardrail.
cachePoint: A cache point configuration to optimize conversation history.
text: A system prompt for the model.
"""

guardContent: GuardContent
cachePoint: CachePoint
text: str


Expand Down
3 changes: 3 additions & 0 deletions tests/fixtures/mocked_model_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ async def stream(
tool_specs: Optional[list[ToolSpec]] = None,
system_prompt: Optional[str] = None,
tool_choice: Optional[Any] = None,
*,
system_prompt_content=None,
**kwargs: Any,
) -> AsyncGenerator[Any, None]:
events = self.map_agent_message_to_events(self.agent_responses[self.index])
for event in events:
Expand Down
80 changes: 80 additions & 0 deletions tests/strands/agent/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,7 @@ def test_agent__call__(
[tool.tool_spec],
system_prompt,
tool_choice=None,
system_prompt_content=[{"text": system_prompt}],
),
unittest.mock.call(
[
Expand Down Expand Up @@ -367,6 +368,7 @@ def test_agent__call__(
[tool.tool_spec],
system_prompt,
tool_choice=None,
system_prompt_content=[{"text": system_prompt}],
),
],
)
Expand Down Expand Up @@ -487,6 +489,7 @@ def test_agent__call__retry_with_reduced_context(mock_model, agent, tool, agener
unittest.mock.ANY,
unittest.mock.ANY,
tool_choice=None,
system_prompt_content=unittest.mock.ANY,
)

conversation_manager_spy.reduce_context.assert_called_once()
Expand Down Expand Up @@ -631,6 +634,7 @@ def test_agent__call__retry_with_overwritten_tool(mock_model, agent, tool, agene
unittest.mock.ANY,
unittest.mock.ANY,
tool_choice=None,
system_prompt_content=unittest.mock.ANY,
)

assert conversation_manager_spy.reduce_context.call_count == 2
Expand Down Expand Up @@ -2160,3 +2164,79 @@ def shell(command: str):

# And that it continued to the LLM call
assert agent.messages[-1] == {"content": [{"text": "I invoked a tool!"}], "role": "assistant"}


def test_agent_string_system_prompt():
"""Test initialization with string system prompt."""
system_prompt = "You are a helpful assistant."
agent = Agent(system_prompt=system_prompt)

assert agent.system_prompt == system_prompt
assert agent._system_prompt_content == [{"text": system_prompt}]


def test_agent_single_text_block_system_prompt():
"""Test initialization with single text SystemContentBlock."""
text = "You are a helpful assistant."
system_prompt_content = [{"text": text}]
agent = Agent(system_prompt=system_prompt_content)

assert agent.system_prompt == text
assert agent._system_prompt_content == system_prompt_content


def test_agent_multiple_blocks_system_prompt():
"""Test initialization with multiple SystemContentBlocks."""
system_prompt_content = [
{"text": "You are a helpful assistant."},
{"cachePoint": {"type": "default"}},
{"text": "Additional instructions."},
]
agent = Agent(system_prompt=system_prompt_content)

assert agent.system_prompt == "You are a helpful assistant.\nAdditional instructions."
assert agent._system_prompt_content == system_prompt_content


def test_agent_single_non_text_block_system_prompt():
"""Test initialization with single non-text SystemContentBlock."""
system_prompt_content = [{"cachePoint": {"type": "default"}}]
agent = Agent(system_prompt=system_prompt_content)

assert agent.system_prompt is None
assert agent._system_prompt_content == system_prompt_content


def test_agent_none_system_prompt():
"""Test initialization with None system prompt."""
agent = Agent(system_prompt=None)

assert agent.system_prompt is None
assert agent._system_prompt_content is None


def test_agent_empty_list_system_prompt():
"""Test initialization with empty list system prompt."""
agent = Agent(system_prompt=[])

assert agent.system_prompt is None
assert agent._system_prompt_content == []


def test_agent_backwards_compatibility_string_access():
"""Test that string system prompts maintain backwards compatibility."""
system_prompt = "You are a helpful assistant."
agent = Agent(system_prompt=system_prompt)

# Should be able to access as string for backwards compatibility
assert agent.system_prompt == system_prompt


def test_agent_backwards_compatibility_single_text_block():
"""Test that single text blocks maintain backwards compatibility."""
text = "You are a helpful assistant."
system_prompt_content = [{"text": text}]
agent = Agent(system_prompt=system_prompt_content)

# Should extract text for backwards compatibility
assert agent.system_prompt == text
Loading