Skip to content

Commit 07a627e

Browse files
SuveenErm-openai
andauthored
Add reasoning parameter to ModelSettings (#388)
fixes #189 @rm-openai Would really appreciate if this can get a quick review. --------- Co-authored-by: Rohan Mehta <[email protected]>
1 parent 0110f3a commit 07a627e

File tree

3 files changed

+12
-0
lines changed

3 files changed

+12
-0
lines changed

src/agents/model_settings.py

+7
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
from dataclasses import dataclass, fields, replace
44
from typing import Literal
55

6+
from openai.types.shared import Reasoning
7+
68

79
@dataclass
810
class ModelSettings:
@@ -40,6 +42,11 @@ class ModelSettings:
4042
max_tokens: int | None = None
4143
"""The maximum number of output tokens to generate."""
4244

45+
reasoning: Reasoning | None = None
46+
"""Configuration options for
47+
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
48+
"""
49+
4350
metadata: dict[str, str] | None = None
4451
"""Metadata to include with the model response call."""
4552

src/agents/models/openai_chatcompletions.py

+4
Original file line numberDiff line numberDiff line change
@@ -521,6 +521,8 @@ async def _fetch_response(
521521
# Match the behavior of Responses where store is True when not given
522522
store = model_settings.store if model_settings.store is not None else True
523523

524+
reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
525+
524526
ret = await self._get_client().chat.completions.create(
525527
model=self.model,
526528
messages=converted_messages,
@@ -536,6 +538,7 @@ async def _fetch_response(
536538
stream=stream,
537539
stream_options={"include_usage": True} if stream else NOT_GIVEN,
538540
store=store,
541+
reasoning_effort=self._non_null_or_not_given(reasoning_effort),
539542
extra_headers=_HEADERS,
540543
metadata=model_settings.metadata,
541544
)
@@ -556,6 +559,7 @@ async def _fetch_response(
556559
temperature=model_settings.temperature,
557560
tools=[],
558561
parallel_tool_calls=parallel_tool_calls or False,
562+
reasoning=model_settings.reasoning,
559563
)
560564
return response, ret
561565

src/agents/models/openai_responses.py

+1
Original file line numberDiff line numberDiff line change
@@ -247,6 +247,7 @@ async def _fetch_response(
247247
extra_headers=_HEADERS,
248248
text=response_format,
249249
store=self._non_null_or_not_given(model_settings.store),
250+
reasoning=self._non_null_or_not_given(model_settings.reasoning),
250251
metadata=model_settings.metadata,
251252
)
252253

0 commit comments

Comments
 (0)