33import json
44import time
55from collections .abc import AsyncIterator
6- from typing import TYPE_CHECKING , Any , Literal , overload
6+ from typing import TYPE_CHECKING , Any , Literal , cast , overload
77
8- from openai import NOT_GIVEN , AsyncOpenAI , AsyncStream
8+ from openai import AsyncOpenAI , AsyncStream , Omit , omit
99from openai .types import ChatModel
1010from openai .types .chat import ChatCompletion , ChatCompletionChunk , ChatCompletionMessage
1111from openai .types .chat .chat_completion import Choice
@@ -44,8 +44,8 @@ def __init__(
4444 self .model = model
4545 self ._client = openai_client
4646
47- def _non_null_or_not_given (self , value : Any ) -> Any :
48- return value if value is not None else NOT_GIVEN
47+ def _non_null_or_omit (self , value : Any ) -> Any :
48+ return value if value is not None else omit
4949
5050 async def get_response (
5151 self ,
@@ -243,13 +243,12 @@ async def _fetch_response(
243243 if tracing .include_data ():
244244 span .span_data .input = converted_messages
245245
246- parallel_tool_calls = (
247- True
248- if model_settings .parallel_tool_calls and tools and len (tools ) > 0
249- else False
250- if model_settings .parallel_tool_calls is False
251- else NOT_GIVEN
252- )
246+ if model_settings .parallel_tool_calls and tools :
247+ parallel_tool_calls : bool | Omit = True
248+ elif model_settings .parallel_tool_calls is False :
249+ parallel_tool_calls = False
250+ else :
251+ parallel_tool_calls = omit
253252 tool_choice = Converter .convert_tool_choice (model_settings .tool_choice )
254253 response_format = Converter .convert_response_format (output_schema )
255254
@@ -259,6 +258,7 @@ async def _fetch_response(
259258 converted_tools .append (Converter .convert_handoff_tool (handoff ))
260259
261260 converted_tools = _to_dump_compatible (converted_tools )
261+ tools_param = converted_tools if converted_tools else omit
262262
263263 if _debug .DONT_LOG_MODEL_DATA :
264264 logger .debug ("Calling LLM" )
@@ -288,28 +288,30 @@ async def _fetch_response(
288288 self ._get_client (), model_settings , stream = stream
289289 )
290290
291+ stream_param : Literal [True ] | Omit = True if stream else omit
292+
291293 ret = await self ._get_client ().chat .completions .create (
292294 model = self .model ,
293295 messages = converted_messages ,
294- tools = converted_tools or NOT_GIVEN ,
295- temperature = self ._non_null_or_not_given (model_settings .temperature ),
296- top_p = self ._non_null_or_not_given (model_settings .top_p ),
297- frequency_penalty = self ._non_null_or_not_given (model_settings .frequency_penalty ),
298- presence_penalty = self ._non_null_or_not_given (model_settings .presence_penalty ),
299- max_tokens = self ._non_null_or_not_given (model_settings .max_tokens ),
296+ tools = tools_param ,
297+ temperature = self ._non_null_or_omit (model_settings .temperature ),
298+ top_p = self ._non_null_or_omit (model_settings .top_p ),
299+ frequency_penalty = self ._non_null_or_omit (model_settings .frequency_penalty ),
300+ presence_penalty = self ._non_null_or_omit (model_settings .presence_penalty ),
301+ max_tokens = self ._non_null_or_omit (model_settings .max_tokens ),
300302 tool_choice = tool_choice ,
301303 response_format = response_format ,
302304 parallel_tool_calls = parallel_tool_calls ,
303- stream = stream ,
304- stream_options = self ._non_null_or_not_given (stream_options ),
305- store = self ._non_null_or_not_given (store ),
306- reasoning_effort = self ._non_null_or_not_given (reasoning_effort ),
307- verbosity = self ._non_null_or_not_given (model_settings .verbosity ),
308- top_logprobs = self ._non_null_or_not_given (model_settings .top_logprobs ),
305+ stream = cast ( Any , stream_param ) ,
306+ stream_options = self ._non_null_or_omit (stream_options ),
307+ store = self ._non_null_or_omit (store ),
308+ reasoning_effort = self ._non_null_or_omit (reasoning_effort ),
309+ verbosity = self ._non_null_or_omit (model_settings .verbosity ),
310+ top_logprobs = self ._non_null_or_omit (model_settings .top_logprobs ),
309311 extra_headers = self ._merge_headers (model_settings ),
310312 extra_query = model_settings .extra_query ,
311313 extra_body = model_settings .extra_body ,
312- metadata = self ._non_null_or_not_given (model_settings .metadata ),
314+ metadata = self ._non_null_or_omit (model_settings .metadata ),
313315 ** (model_settings .extra_args or {}),
314316 )
315317
@@ -319,14 +321,13 @@ async def _fetch_response(
319321 responses_tool_choice = OpenAIResponsesConverter .convert_tool_choice (
320322 model_settings .tool_choice
321323 )
322- if responses_tool_choice is None or responses_tool_choice == NOT_GIVEN :
324+ if responses_tool_choice is None or responses_tool_choice is omit :
323325 # For Responses API data compatibility with Chat Completions patterns,
324326 # we need to set "none" if tool_choice is absent.
325327 # Without this fix, you'll get the following error:
326328 # pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response
327329 # tool_choice.literal['none','auto','required']
328330 # Input should be 'none', 'auto' or 'required'
329- # [type=literal_error, input_value=NOT_GIVEN, input_type=NotGiven]
330331 # see also: https://github.com/openai/openai-agents-python/issues/980
331332 responses_tool_choice = "auto"
332333
0 commit comments