1414from pydantic import BaseModel
1515from typing_extensions import Unpack , override
1616
17- from ..types .content import ContentBlock , Messages , SystemContentBlock
17+ from ..types .content import ContentBlock , Messages
1818from ..types .exceptions import ContextWindowOverflowException , ModelThrottledException
1919from ..types .streaming import StreamEvent
2020from ..types .tools import ToolChoice , ToolResult , ToolSpec , ToolUse
@@ -198,21 +198,12 @@ def _format_request_tool_choice(cls, tool_choice: ToolChoice | None) -> dict[str
198198 return {"tool_choice" : "auto" }
199199
200200 @classmethod
201- def format_request_messages (
202- cls ,
203- messages : Messages ,
204- system_prompt : Optional [str ] = None ,
205- * ,
206- system_prompt_content : Optional [list [SystemContentBlock ]] = None ,
207- ** kwargs : Any ,
208- ) -> list [dict [str , Any ]]:
201+ def format_request_messages (cls , messages : Messages , system_prompt : Optional [str ] = None ) -> list [dict [str , Any ]]:
209202 """Format an OpenAI compatible messages array.
210203
211204 Args:
212205 messages: List of message objects to be processed by the model.
213206 system_prompt: System prompt to provide context to the model.
214- system_prompt_content: Structured system prompt content blocks (for advanced use cases).
215- **kwargs: Additional keyword arguments for future extensibility.
216207
217208 Returns:
218209 An OpenAI compatible messages array.
@@ -253,17 +244,14 @@ def format_request(
253244 tool_specs : Optional [list [ToolSpec ]] = None ,
254245 system_prompt : Optional [str ] = None ,
255246 tool_choice : ToolChoice | None = None ,
256- system_prompt_content : Optional [list [SystemContentBlock ]] = None ,
257247 ) -> dict [str , Any ]:
258248 """Format an OpenAI compatible chat streaming request.
259249
260250 Args:
261251 messages: List of message objects to be processed by the model.
262252 tool_specs: List of tool specifications to make available to the model.
263- system_prompt: System prompt to provide context to the model. When system_prompt_content
264- is provided, this should contain the flattened text for legacy subclass compatibility.
253+ system_prompt: System prompt to provide context to the model.
265254 tool_choice: Selection strategy for tool invocation.
266- system_prompt_content: Structured system prompt content blocks.
267255
268256 Returns:
269257 An OpenAI compatible chat streaming request.
@@ -272,27 +260,8 @@ def format_request(
272260 TypeError: If a message contains a content block type that cannot be converted to an OpenAI-compatible
273261 format.
274262 """
275- # Handle system prompt content with backwards compatibility
276- # LEGACY COMPATIBILITY: The try/except approach is needed because:
277- # 1. Some subclasses may override format_request_messages() with the old signature:
278- # format_request_messages(cls, messages: Messages, system_prompt: Optional[str] = None)
279- # 2. Calling with system_prompt_content kwarg would fail on legacy overrides
280- # 3. This provides graceful fallback for existing subclass implementations
281- if system_prompt_content :
282- try :
283- # Try new signature with system_prompt_content parameter
284- messages_formatted = self .format_request_messages (
285- messages , system_prompt , system_prompt_content = system_prompt_content
286- )
287- except TypeError :
288- # Fallback for legacy subclass overrides that don't support system_prompt_content
289- # Use system_prompt which should be populated for legacy compatibility
290- messages_formatted = self .format_request_messages (messages , system_prompt )
291- else :
292- messages_formatted = self .format_request_messages (messages , system_prompt )
293-
294263 return {
295- "messages" : messages_formatted ,
264+ "messages" : self . format_request_messages ( messages , system_prompt ) ,
296265 "model" : self .config ["model_id" ],
297266 "stream" : True ,
298267 "stream_options" : {"include_usage" : True },
@@ -391,7 +360,6 @@ async def stream(
391360 system_prompt : Optional [str ] = None ,
392361 * ,
393362 tool_choice : ToolChoice | None = None ,
394- system_prompt_content : Optional [list [SystemContentBlock ]] = None ,
395363 ** kwargs : Any ,
396364 ) -> AsyncGenerator [StreamEvent , None ]:
397365 """Stream conversation with the OpenAI model.
@@ -411,18 +379,7 @@ async def stream(
411379 ModelThrottledException: If the request is throttled by OpenAI (rate limits).
412380 """
413381 logger .debug ("formatting request" )
414- # TODO This logic si wrong
415- # Use system_prompt_content if provided, otherwise fall back to system_prompt
416- if system_prompt_content :
417- # Extract text from first block if it's a simple text block
418- if len (system_prompt_content ) == 1 and "text" in system_prompt_content [0 ]:
419- system_prompt_str = system_prompt_content [0 ]["text" ]
420- else :
421- system_prompt_str = None # OpenAI doesn't support complex system content blocks
422- else :
423- system_prompt_str = system_prompt
424-
425- request = self .format_request (messages , tool_specs , system_prompt_str )
382+ request = self .format_request (messages , tool_specs , system_prompt , tool_choice )
426383 logger .debug ("formatted request=<%s>" , request )
427384
428385 logger .debug ("invoking model" )
@@ -501,13 +458,7 @@ async def stream(
501458
502459 @override
503460 async def structured_output (
504- self ,
505- output_model : Type [T ],
506- prompt : Messages ,
507- system_prompt : Optional [str ] = None ,
508- * ,
509- system_prompt_content : Optional [list [SystemContentBlock ]] = None ,
510- ** kwargs : Any ,
461+ self , output_model : Type [T ], prompt : Messages , system_prompt : Optional [str ] = None , ** kwargs : Any
511462 ) -> AsyncGenerator [dict [str , Union [T , Any ]], None ]:
512463 """Get structured output from the model.
513464
0 commit comments