Skip to content

Commit 9407743

Browse files
authored
Migrate openai from 1.x to 2.2.0 (#1874)
This pull request resolves #1867 This is a blocker for developers trying ChatKit Python server SDK, so we should release a new version including this bump as early as possible. I've confirmed migrating to 2.x major version does not bring any incompatibility issues with examples in this repo.
1 parent 0f9c9e3 commit 9407743

12 files changed

+102
-95
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ celerybeat.pid
103103
.python-version
104104
.env*
105105
.venv
106+
.venv*
106107
env/
107108
venv/
108109
ENV/

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ requires-python = ">=3.9"
77
license = "MIT"
88
authors = [{ name = "OpenAI", email = "[email protected]" }]
99
dependencies = [
10-
"openai>=1.107.1,<2",
10+
"openai>=2.2,<3",
1111
"pydantic>=2.10, <3",
1212
"griffe>=1.5.6, <2",
1313
"typing-extensions>=4.12.2, <5",

src/agents/extensions/models/litellm_model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"dependency group: `pip install 'openai-agents[litellm]'`."
1919
) from _e
2020

21-
from openai import NOT_GIVEN, AsyncStream, NotGiven
21+
from openai import AsyncStream, NotGiven, omit
2222
from openai.types.chat import (
2323
ChatCompletionChunk,
2424
ChatCompletionMessageCustomToolCall,
@@ -374,7 +374,7 @@ async def _fetch_response(
374374
object="response",
375375
output=[],
376376
tool_choice=cast(Literal["auto", "required", "none"], tool_choice)
377-
if tool_choice != NOT_GIVEN
377+
if tool_choice is not omit
378378
else "auto",
379379
top_p=model_settings.top_p,
380380
temperature=model_settings.temperature,

src/agents/models/chatcmpl_converter.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22

33
import json
44
from collections.abc import Iterable
5-
from typing import Any, Literal, cast
5+
from typing import Any, Literal, Union, cast
66

7-
from openai import NOT_GIVEN, NotGiven
7+
from openai import Omit, omit
88
from openai.types.chat import (
99
ChatCompletionAssistantMessageParam,
1010
ChatCompletionContentPartImageParam,
@@ -54,9 +54,9 @@ class Converter:
5454
@classmethod
5555
def convert_tool_choice(
5656
cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None
57-
) -> ChatCompletionToolChoiceOptionParam | NotGiven:
57+
) -> ChatCompletionToolChoiceOptionParam | Omit:
5858
if tool_choice is None:
59-
return NOT_GIVEN
59+
return omit
6060
elif isinstance(tool_choice, MCPToolChoice):
6161
raise UserError("MCPToolChoice is not supported for Chat Completions models")
6262
elif tool_choice == "auto":
@@ -76,9 +76,9 @@ def convert_tool_choice(
7676
@classmethod
7777
def convert_response_format(
7878
cls, final_output_schema: AgentOutputSchemaBase | None
79-
) -> ResponseFormat | NotGiven:
79+
) -> ResponseFormat | Omit:
8080
if not final_output_schema or final_output_schema.is_plain_text():
81-
return NOT_GIVEN
81+
return omit
8282

8383
return {
8484
"type": "json_schema",
@@ -506,10 +506,13 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
506506
# 5) function call output => tool message
507507
elif func_output := cls.maybe_function_tool_call_output(item):
508508
flush_assistant_message()
509+
output_content = cast(
510+
Union[str, Iterable[ResponseInputContentParam]], func_output["output"]
511+
)
509512
msg: ChatCompletionToolMessageParam = {
510513
"role": "tool",
511514
"tool_call_id": func_output["call_id"],
512-
"content": func_output["output"],
515+
"content": cls.extract_text_content(output_content),
513516
}
514517
result.append(msg)
515518

src/agents/models/openai_chatcompletions.py

Lines changed: 27 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33
import json
44
import time
55
from collections.abc import AsyncIterator
6-
from typing import TYPE_CHECKING, Any, Literal, overload
6+
from typing import TYPE_CHECKING, Any, Literal, cast, overload
77

8-
from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
8+
from openai import AsyncOpenAI, AsyncStream, Omit, omit
99
from openai.types import ChatModel
1010
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
1111
from openai.types.chat.chat_completion import Choice
@@ -44,8 +44,8 @@ def __init__(
4444
self.model = model
4545
self._client = openai_client
4646

47-
def _non_null_or_not_given(self, value: Any) -> Any:
48-
return value if value is not None else NOT_GIVEN
47+
def _non_null_or_omit(self, value: Any) -> Any:
48+
return value if value is not None else omit
4949

5050
async def get_response(
5151
self,
@@ -243,13 +243,12 @@ async def _fetch_response(
243243
if tracing.include_data():
244244
span.span_data.input = converted_messages
245245

246-
parallel_tool_calls = (
247-
True
248-
if model_settings.parallel_tool_calls and tools and len(tools) > 0
249-
else False
250-
if model_settings.parallel_tool_calls is False
251-
else NOT_GIVEN
252-
)
246+
if model_settings.parallel_tool_calls and tools:
247+
parallel_tool_calls: bool | Omit = True
248+
elif model_settings.parallel_tool_calls is False:
249+
parallel_tool_calls = False
250+
else:
251+
parallel_tool_calls = omit
253252
tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
254253
response_format = Converter.convert_response_format(output_schema)
255254

@@ -259,6 +258,7 @@ async def _fetch_response(
259258
converted_tools.append(Converter.convert_handoff_tool(handoff))
260259

261260
converted_tools = _to_dump_compatible(converted_tools)
261+
tools_param = converted_tools if converted_tools else omit
262262

263263
if _debug.DONT_LOG_MODEL_DATA:
264264
logger.debug("Calling LLM")
@@ -288,28 +288,30 @@ async def _fetch_response(
288288
self._get_client(), model_settings, stream=stream
289289
)
290290

291+
stream_param: Literal[True] | Omit = True if stream else omit
292+
291293
ret = await self._get_client().chat.completions.create(
292294
model=self.model,
293295
messages=converted_messages,
294-
tools=converted_tools or NOT_GIVEN,
295-
temperature=self._non_null_or_not_given(model_settings.temperature),
296-
top_p=self._non_null_or_not_given(model_settings.top_p),
297-
frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
298-
presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
299-
max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
296+
tools=tools_param,
297+
temperature=self._non_null_or_omit(model_settings.temperature),
298+
top_p=self._non_null_or_omit(model_settings.top_p),
299+
frequency_penalty=self._non_null_or_omit(model_settings.frequency_penalty),
300+
presence_penalty=self._non_null_or_omit(model_settings.presence_penalty),
301+
max_tokens=self._non_null_or_omit(model_settings.max_tokens),
300302
tool_choice=tool_choice,
301303
response_format=response_format,
302304
parallel_tool_calls=parallel_tool_calls,
303-
stream=stream,
304-
stream_options=self._non_null_or_not_given(stream_options),
305-
store=self._non_null_or_not_given(store),
306-
reasoning_effort=self._non_null_or_not_given(reasoning_effort),
307-
verbosity=self._non_null_or_not_given(model_settings.verbosity),
308-
top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
305+
stream=cast(Any, stream_param),
306+
stream_options=self._non_null_or_omit(stream_options),
307+
store=self._non_null_or_omit(store),
308+
reasoning_effort=self._non_null_or_omit(reasoning_effort),
309+
verbosity=self._non_null_or_omit(model_settings.verbosity),
310+
top_logprobs=self._non_null_or_omit(model_settings.top_logprobs),
309311
extra_headers=self._merge_headers(model_settings),
310312
extra_query=model_settings.extra_query,
311313
extra_body=model_settings.extra_body,
312-
metadata=self._non_null_or_not_given(model_settings.metadata),
314+
metadata=self._non_null_or_omit(model_settings.metadata),
313315
**(model_settings.extra_args or {}),
314316
)
315317

@@ -319,14 +321,13 @@ async def _fetch_response(
319321
responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice(
320322
model_settings.tool_choice
321323
)
322-
if responses_tool_choice is None or responses_tool_choice == NOT_GIVEN:
324+
if responses_tool_choice is None or responses_tool_choice is omit:
323325
# For Responses API data compatibility with Chat Completions patterns,
324326
# we need to set "none" if tool_choice is absent.
325327
# Without this fix, you'll get the following error:
326328
# pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response
327329
# tool_choice.literal['none','auto','required']
328330
# Input should be 'none', 'auto' or 'required'
329-
# [type=literal_error, input_value=NOT_GIVEN, input_type=NotGiven]
330331
# see also: https://github.com/openai/openai-agents-python/issues/980
331332
responses_tool_choice = "auto"
332333

src/agents/models/openai_responses.py

Lines changed: 31 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
from collections.abc import AsyncIterator
55
from contextvars import ContextVar
66
from dataclasses import dataclass
7-
from typing import TYPE_CHECKING, Any, Literal, cast, overload
7+
from typing import TYPE_CHECKING, Any, Literal, Union, cast, overload
88

9-
from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
9+
from openai import APIStatusError, AsyncOpenAI, AsyncStream, Omit, omit
1010
from openai.types import ChatModel
1111
from openai.types.responses import (
1212
Response,
@@ -69,8 +69,8 @@ def __init__(
6969
self.model = model
7070
self._client = openai_client
7171

72-
def _non_null_or_not_given(self, value: Any) -> Any:
73-
return value if value is not None else NOT_GIVEN
72+
def _non_null_or_omit(self, value: Any) -> Any:
73+
return value if value is not None else omit
7474

7575
async def get_response(
7676
self,
@@ -249,13 +249,12 @@ async def _fetch_response(
249249
list_input = ItemHelpers.input_to_new_input_list(input)
250250
list_input = _to_dump_compatible(list_input)
251251

252-
parallel_tool_calls = (
253-
True
254-
if model_settings.parallel_tool_calls and tools and len(tools) > 0
255-
else False
256-
if model_settings.parallel_tool_calls is False
257-
else NOT_GIVEN
258-
)
252+
if model_settings.parallel_tool_calls and tools:
253+
parallel_tool_calls: bool | Omit = True
254+
elif model_settings.parallel_tool_calls is False:
255+
parallel_tool_calls = False
256+
else:
257+
parallel_tool_calls = omit
259258

260259
tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
261260
converted_tools = Converter.convert_tools(tools, handoffs)
@@ -297,36 +296,39 @@ async def _fetch_response(
297296
if model_settings.top_logprobs is not None:
298297
extra_args["top_logprobs"] = model_settings.top_logprobs
299298
if model_settings.verbosity is not None:
300-
if response_format != NOT_GIVEN:
299+
if response_format is not omit:
301300
response_format["verbosity"] = model_settings.verbosity # type: ignore [index]
302301
else:
303302
response_format = {"verbosity": model_settings.verbosity}
304303

305-
return await self._client.responses.create(
306-
previous_response_id=self._non_null_or_not_given(previous_response_id),
307-
conversation=self._non_null_or_not_given(conversation_id),
308-
instructions=self._non_null_or_not_given(system_instructions),
304+
stream_param: Literal[True] | Omit = True if stream else omit
305+
306+
response = await self._client.responses.create(
307+
previous_response_id=self._non_null_or_omit(previous_response_id),
308+
conversation=self._non_null_or_omit(conversation_id),
309+
instructions=self._non_null_or_omit(system_instructions),
309310
model=self.model,
310311
input=list_input,
311312
include=include,
312313
tools=converted_tools_payload,
313-
prompt=self._non_null_or_not_given(prompt),
314-
temperature=self._non_null_or_not_given(model_settings.temperature),
315-
top_p=self._non_null_or_not_given(model_settings.top_p),
316-
truncation=self._non_null_or_not_given(model_settings.truncation),
317-
max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
314+
prompt=self._non_null_or_omit(prompt),
315+
temperature=self._non_null_or_omit(model_settings.temperature),
316+
top_p=self._non_null_or_omit(model_settings.top_p),
317+
truncation=self._non_null_or_omit(model_settings.truncation),
318+
max_output_tokens=self._non_null_or_omit(model_settings.max_tokens),
318319
tool_choice=tool_choice,
319320
parallel_tool_calls=parallel_tool_calls,
320-
stream=stream,
321+
stream=cast(Any, stream_param),
321322
extra_headers=self._merge_headers(model_settings),
322323
extra_query=model_settings.extra_query,
323324
extra_body=model_settings.extra_body,
324325
text=response_format,
325-
store=self._non_null_or_not_given(model_settings.store),
326-
reasoning=self._non_null_or_not_given(model_settings.reasoning),
327-
metadata=self._non_null_or_not_given(model_settings.metadata),
326+
store=self._non_null_or_omit(model_settings.store),
327+
reasoning=self._non_null_or_omit(model_settings.reasoning),
328+
metadata=self._non_null_or_omit(model_settings.metadata),
328329
**extra_args,
329330
)
331+
return cast(Union[Response, AsyncStream[ResponseStreamEvent]], response)
330332

331333
def _get_client(self) -> AsyncOpenAI:
332334
if self._client is None:
@@ -351,9 +353,9 @@ class Converter:
351353
@classmethod
352354
def convert_tool_choice(
353355
cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None
354-
) -> response_create_params.ToolChoice | NotGiven:
356+
) -> response_create_params.ToolChoice | Omit:
355357
if tool_choice is None:
356-
return NOT_GIVEN
358+
return omit
357359
elif isinstance(tool_choice, MCPToolChoice):
358360
return {
359361
"server_label": tool_choice.server_label,
@@ -404,9 +406,9 @@ def convert_tool_choice(
404406
@classmethod
405407
def get_response_format(
406408
cls, output_schema: AgentOutputSchemaBase | None
407-
) -> ResponseTextConfigParam | NotGiven:
409+
) -> ResponseTextConfigParam | Omit:
408410
if output_schema is None or output_schema.is_plain_text():
409-
return NOT_GIVEN
411+
return omit
410412
else:
411413
return {
412414
"format": {

tests/fake_model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,7 @@ async def stream_response(
253253
item_id=output_item.call_id,
254254
output_index=output_index,
255255
arguments=output_item.arguments,
256+
name=output_item.name,
256257
sequence_number=sequence_number,
257258
)
258259
sequence_number += 1

tests/test_model_payload_iterators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import httpx
77
import pytest
8-
from openai import NOT_GIVEN
8+
from openai import omit
99
from openai.types.chat.chat_completion import ChatCompletion
1010
from openai.types.responses import ToolParam
1111

@@ -82,7 +82,7 @@ class DummyCompletions:
8282
async def create(self, **kwargs):
8383
captured_kwargs.update(kwargs)
8484
_force_materialization(kwargs["messages"])
85-
if kwargs["tools"] is not NOT_GIVEN:
85+
if kwargs["tools"] is not omit:
8686
_force_materialization(kwargs["tools"])
8787
return ChatCompletion(
8888
id="dummy-id",

tests/test_openai_chatcompletions.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import httpx
77
import pytest
8-
from openai import NOT_GIVEN, AsyncOpenAI
8+
from openai import AsyncOpenAI, omit
99
from openai.types.chat.chat_completion import ChatCompletion, Choice
1010
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
1111
from openai.types.chat.chat_completion_message import ChatCompletionMessage
@@ -285,17 +285,17 @@ def __init__(self, completions: DummyCompletions) -> None:
285285
assert result is chat
286286
# Ensure expected args were passed through to OpenAI client.
287287
kwargs = completions.kwargs
288-
assert kwargs["stream"] is False
289-
assert kwargs["store"] is NOT_GIVEN
288+
assert kwargs["stream"] is omit
289+
assert kwargs["store"] is omit
290290
assert kwargs["model"] == "gpt-4"
291291
assert kwargs["messages"][0]["role"] == "system"
292292
assert kwargs["messages"][0]["content"] == "sys"
293293
assert kwargs["messages"][1]["role"] == "user"
294-
# Defaults for optional fields become the NOT_GIVEN sentinel
295-
assert kwargs["tools"] is NOT_GIVEN
296-
assert kwargs["tool_choice"] is NOT_GIVEN
297-
assert kwargs["response_format"] is NOT_GIVEN
298-
assert kwargs["stream_options"] is NOT_GIVEN
294+
# Defaults for optional fields become the omit sentinel
295+
assert kwargs["tools"] is omit
296+
assert kwargs["tool_choice"] is omit
297+
assert kwargs["response_format"] is omit
298+
assert kwargs["stream_options"] is omit
299299

300300

301301
@pytest.mark.asyncio
@@ -340,8 +340,8 @@ def __init__(self, completions: DummyCompletions) -> None:
340340
)
341341
# Check OpenAI client was called for streaming
342342
assert completions.kwargs["stream"] is True
343-
assert completions.kwargs["store"] is NOT_GIVEN
344-
assert completions.kwargs["stream_options"] is NOT_GIVEN
343+
assert completions.kwargs["store"] is omit
344+
assert completions.kwargs["stream_options"] is omit
345345
# Response is a proper openai Response
346346
assert isinstance(response, Response)
347347
assert response.id == FAKE_RESPONSES_ID

0 commit comments

Comments
 (0)