Skip to content

Commit a8e36a6

Browse files
committed
linting
1 parent 0eb2f2b commit a8e36a6

File tree

12 files changed

+61
-78
lines changed

12 files changed

+61
-78
lines changed

src/strands/agent/agent.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -967,7 +967,6 @@ def _filter_tool_parameters_for_recording(self, tool_name: str, input_params: di
967967
properties = tool_spec["inputSchema"]["json"]["properties"]
968968
return {k: v for k, v in input_params.items() if k in properties}
969969

970-
971970
def _initialize_system_prompt(
972971
self, system_prompt: str | list[SystemContentBlock] | None
973972
) -> tuple[str | None, list[SystemContentBlock] | None]:

src/strands/event_loop/event_loop.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -335,12 +335,12 @@ async def _handle_model_execution(
335335
tool_specs = agent.tool_registry.get_all_tool_specs()
336336
try:
337337
async for event in stream_messages(
338-
agent.model,
338+
agent.model,
339339
agent.system_prompt,
340-
agent.messages,
340+
agent.messages,
341341
tool_specs,
342342
system_prompt_content=agent._system_prompt_content,
343-
tool_choice=structured_output_context.tool_choice
343+
tool_choice=structured_output_context.tool_choice,
344344
):
345345
yield event
346346

src/strands/event_loop/streaming.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,7 @@ async def stream_messages(
421421
*,
422422
tool_choice: Optional[Any] = None,
423423
system_prompt_content: Optional[list[SystemContentBlock]] = None,
424-
**kwargs: Any
424+
**kwargs: Any,
425425
) -> AsyncGenerator[TypedEvent, None]:
426426
"""Streams messages to the model and processes the response.
427427
@@ -431,19 +431,22 @@ async def stream_messages(
431431
messages: List of messages to send.
432432
tool_specs: The list of tool specs.
433433
tool_choice: Optional tool choice constraint for forcing specific tool usage.
434-
system_prompt_content: The authoritative system prompt content blocks that always contains the system prompt data.
434+
system_prompt_content: The authoritative system prompt content blocks that always contains the
435+
system prompt data.
436+
**kwargs: Additional keyword arguments for future extensibility.
437+
435438
Yields:
436439
The reason for stopping, the final message, and the usage metrics
437440
"""
438441
logger.debug("model=<%s> | streaming messages", model)
439442

440443
messages = _normalize_messages(messages)
441444
start_time = time.time()
442-
445+
443446
chunks = model.stream(
444-
messages,
445-
tool_specs if tool_specs else None,
446-
system_prompt,
447+
messages,
448+
tool_specs if tool_specs else None,
449+
system_prompt,
447450
tool_choice=tool_choice,
448451
system_prompt_content=system_prompt_content,
449452
)

src/strands/models/bedrock.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -218,9 +218,7 @@ def _format_request(
218218
# Add cache point if configured (backwards compatibility)
219219
if self.config.get("cache_prompt"):
220220
warnings.warn(
221-
"cache_prompt is deprecated. Use SystemContentBlock with cachePoint instead.",
222-
UserWarning,
223-
stacklevel=3
221+
"cache_prompt is deprecated. Use SystemContentBlock with cachePoint instead.", UserWarning, stacklevel=3
224222
)
225223
system_blocks.append({"cachePoint": {"type": self.config["cache_prompt"]}})
226224

@@ -613,6 +611,7 @@ async def stream(
613611
tool_specs: List of tool specifications to make available to the model.
614612
system_prompt: System prompt to provide context to the model.
615613
tool_choice: Selection strategy for tool invocation.
614+
system_prompt_content: System prompt content blocks to provide context to the model.
616615
**kwargs: Additional keyword arguments for future extensibility.
617616
618617
Yields:

tests/fixtures/mocked_model_provider.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ async def stream(
6060
tool_choice: Optional[Any] = None,
6161
*,
6262
system_prompt_content=None,
63-
**kwargs: Any
63+
**kwargs: Any,
6464
) -> AsyncGenerator[Any, None]:
6565
events = self.map_agent_message_to_events(self.agent_responses[self.index])
6666
for event in events:

tests/strands/agent/test_agent.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2164,11 +2164,13 @@ def shell(command: str):
21642164

21652165
# And that it continued to the LLM call
21662166
assert agent.messages[-1] == {"content": [{"text": "I invoked a tool!"}], "role": "assistant"}
2167+
2168+
21672169
def test_agent_string_system_prompt():
21682170
"""Test initialization with string system prompt."""
21692171
system_prompt = "You are a helpful assistant."
21702172
agent = Agent(system_prompt=system_prompt)
2171-
2173+
21722174
assert agent.system_prompt == system_prompt
21732175
assert agent._system_prompt_content == [{"text": system_prompt}]
21742176

@@ -2178,7 +2180,7 @@ def test_agent_single_text_block_system_prompt():
21782180
text = "You are a helpful assistant."
21792181
system_prompt_content = [{"text": text}]
21802182
agent = Agent(system_prompt=system_prompt_content)
2181-
2183+
21822184
assert agent.system_prompt == text
21832185
assert agent._system_prompt_content == system_prompt_content
21842186

@@ -2188,10 +2190,10 @@ def test_agent_multiple_blocks_system_prompt():
21882190
system_prompt_content = [
21892191
{"text": "You are a helpful assistant."},
21902192
{"cachePoint": {"type": "default"}},
2191-
{"text": "Additional instructions."}
2193+
{"text": "Additional instructions."},
21922194
]
21932195
agent = Agent(system_prompt=system_prompt_content)
2194-
2196+
21952197
assert agent.system_prompt == "You are a helpful assistant.\nAdditional instructions."
21962198
assert agent._system_prompt_content == system_prompt_content
21972199

@@ -2200,23 +2202,23 @@ def test_agent_single_non_text_block_system_prompt():
22002202
"""Test initialization with single non-text SystemContentBlock."""
22012203
system_prompt_content = [{"cachePoint": {"type": "default"}}]
22022204
agent = Agent(system_prompt=system_prompt_content)
2203-
2205+
22042206
assert agent.system_prompt is None
22052207
assert agent._system_prompt_content == system_prompt_content
22062208

22072209

22082210
def test_agent_none_system_prompt():
22092211
"""Test initialization with None system prompt."""
22102212
agent = Agent(system_prompt=None)
2211-
2213+
22122214
assert agent.system_prompt is None
22132215
assert agent._system_prompt_content is None
22142216

22152217

22162218
def test_agent_empty_list_system_prompt():
22172219
"""Test initialization with empty list system prompt."""
22182220
agent = Agent(system_prompt=[])
2219-
2221+
22202222
assert agent.system_prompt is None
22212223
assert agent._system_prompt_content == []
22222224

@@ -2225,7 +2227,7 @@ def test_agent_backwards_compatibility_string_access():
22252227
"""Test that string system prompts maintain backwards compatibility."""
22262228
system_prompt = "You are a helpful assistant."
22272229
agent = Agent(system_prompt=system_prompt)
2228-
2230+
22292231
# Should be able to access as string for backwards compatibility
22302232
assert agent.system_prompt == system_prompt
22312233

@@ -2235,9 +2237,6 @@ def test_agent_backwards_compatibility_single_text_block():
22352237
text = "You are a helpful assistant."
22362238
system_prompt_content = [{"text": text}]
22372239
agent = Agent(system_prompt=system_prompt_content)
2238-
2240+
22392241
# Should extract text for backwards compatibility
22402242
assert agent.system_prompt == text
2241-
2242-
2243-

tests/strands/event_loop/test_streaming.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -861,10 +861,7 @@ async def test_stream_messages_with_system_prompt_content(agenerator, alist):
861861
]
862862
)
863863

864-
system_prompt_content = [
865-
{"text": "You are a helpful assistant."},
866-
{"cachePoint": {"type": "default"}}
867-
]
864+
system_prompt_content = [{"text": "You are a helpful assistant."}, {"cachePoint": {"type": "default"}}]
868865

869866
stream = strands.event_loop.streaming.stream_messages(
870867
mock_model,

tests/strands/models/test_bedrock.py

Lines changed: 15 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -409,11 +409,8 @@ def test_format_request_system_prompt(model, messages, model_id, system_prompt):
409409

410410
def test_format_request_system_prompt_content(model, messages, model_id):
411411
"""Test _format_request with SystemContentBlock input."""
412-
system_prompt_content = [
413-
{"text": "You are a helpful assistant."},
414-
{"cachePoint": {"type": "default"}}
415-
]
416-
412+
system_prompt_content = [{"text": "You are a helpful assistant."}, {"cachePoint": {"type": "default"}}]
413+
417414
tru_request = model._format_request(messages, system_prompt_content=system_prompt_content)
418415
exp_request = {
419416
"inferenceConfig": {},
@@ -429,18 +426,15 @@ def test_format_request_system_prompt_content_with_cache_prompt_config(model, me
429426
"""Test _format_request with SystemContentBlock and cache_prompt config (backwards compatibility)."""
430427
system_prompt_content = [{"text": "You are a helpful assistant."}]
431428
model.update_config(cache_prompt="default")
432-
429+
433430
with pytest.warns(UserWarning, match="cache_prompt is deprecated"):
434431
tru_request = model._format_request(messages, system_prompt_content=system_prompt_content)
435-
432+
436433
exp_request = {
437434
"inferenceConfig": {},
438435
"modelId": model_id,
439436
"messages": messages,
440-
"system": [
441-
{"text": "You are a helpful assistant."},
442-
{"cachePoint": {"type": "default"}}
443-
],
437+
"system": [{"text": "You are a helpful assistant."}, {"cachePoint": {"type": "default"}}],
444438
}
445439

446440
assert tru_request == exp_request
@@ -528,10 +522,10 @@ def test_format_request_tool_choice_tool(model, messages, model_id, tool_spec):
528522

529523
def test_format_request_cache(model, messages, model_id, tool_spec, cache_type):
530524
model.update_config(cache_prompt=cache_type, cache_tools=cache_type)
531-
525+
532526
with pytest.warns(UserWarning, match="cache_prompt is deprecated"):
533527
tru_request = model._format_request(messages, tool_specs=[tool_spec])
534-
528+
535529
exp_request = {
536530
"inferenceConfig": {},
537531
"modelId": model_id,
@@ -668,18 +662,15 @@ async def test_stream(bedrock_client, model, messages, tool_spec, model_id, addi
668662
async def test_stream_with_system_prompt_content(bedrock_client, model, messages, alist):
669663
"""Test stream method with system_prompt_content parameter."""
670664
bedrock_client.converse_stream.return_value = {"stream": ["e1", "e2"]}
671-
672-
system_prompt_content = [
673-
{"text": "You are a helpful assistant."},
674-
{"cachePoint": {"type": "default"}}
675-
]
665+
666+
system_prompt_content = [{"text": "You are a helpful assistant."}, {"cachePoint": {"type": "default"}}]
676667

677668
response = model.stream(messages, system_prompt_content=system_prompt_content)
678669
tru_chunks = await alist(response)
679670
exp_chunks = ["e1", "e2"]
680671

681672
assert tru_chunks == exp_chunks
682-
673+
683674
# Verify the request was formatted with system_prompt_content
684675
expected_request = {
685676
"inferenceConfig": {},
@@ -694,12 +685,14 @@ async def test_stream_with_system_prompt_content(bedrock_client, model, messages
694685
async def test_stream_backwards_compatibility_single_text_block(bedrock_client, model, messages, alist):
695686
"""Test that single text block in system_prompt_content works with legacy system_prompt."""
696687
bedrock_client.converse_stream.return_value = {"stream": ["e1", "e2"]}
697-
688+
698689
system_prompt_content = [{"text": "You are a helpful assistant."}]
699690

700-
response = model.stream(messages, system_prompt="You are a helpful assistant.", system_prompt_content=system_prompt_content)
691+
response = model.stream(
692+
messages, system_prompt="You are a helpful assistant.", system_prompt_content=system_prompt_content
693+
)
701694
await alist(response)
702-
695+
703696
# Verify the request was formatted with system_prompt_content
704697
expected_request = {
705698
"inferenceConfig": {},

tests_integ/models/test_model_bedrock.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -274,12 +274,9 @@ def test_multi_prompt_system_content():
274274
system_prompt_content = [
275275
{"text": "You are a helpful assistant."},
276276
{"text": "Always be concise."},
277-
{"text": "End responses with 'Done.'"}
277+
{"text": "End responses with 'Done.'"},
278278
]
279-
280-
agent = Agent(
281-
system_prompt=system_prompt_content,
282-
load_tools_from_directory=False
283-
)
284-
result = agent("Hello!")
285-
# just verifying there is no failure
279+
280+
agent = Agent(system_prompt=system_prompt_content, load_tools_from_directory=False)
281+
# just verifying there is no failure
282+
agent("Hello!")

tests_integ/models/test_model_openai.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -225,12 +225,9 @@ def test_rate_limit_throttling_integration_no_retries(model):
225225

226226
def test_content_blocks_handling(model):
227227
"""Test that content blocks are handled properly without failures."""
228-
content = [
229-
{"text": "What is 2+2?"},
230-
{"text": "Please be brief."}
231-
]
232-
228+
content = [{"text": "What is 2+2?"}, {"text": "Please be brief."}]
229+
233230
agent = Agent(model=model, load_tools_from_directory=False)
234231
result = agent(content)
235-
232+
236233
assert "4" in result.message["content"][0]["text"]

0 commit comments

Comments
 (0)