diff --git a/src/mcp_agent/llm/providers/augmented_llm_openai.py b/src/mcp_agent/llm/providers/augmented_llm_openai.py index 42c728f70..1e876184c 100644 --- a/src/mcp_agent/llm/providers/augmented_llm_openai.py +++ b/src/mcp_agent/llm/providers/augmented_llm_openai.py @@ -78,7 +78,7 @@ def __init__(self, provider: Provider = Provider.OPENAI, *args, **kwargs) -> Non self._reasoning_effort = self.context.config.openai.reasoning_effort # Determine if we're using a reasoning model - # TODO -- move this to model capabiltities, add o4. + # TODO -- move this to model capabilities, add o4. chosen_model = self.default_request_params.model if self.default_request_params else None self._reasoning = chosen_model and ( chosen_model.startswith("o3") or chosen_model.startswith("o1") @@ -325,7 +325,7 @@ async def post_tool_call( return result def _prepare_api_request( - self, messages, tools, request_params: RequestParams + self, messages, tools: List[ChatCompletionToolParam] | None, request_params: RequestParams ) -> dict[str, str]: # Create base arguments dictionary @@ -345,9 +345,8 @@ def _prepare_api_request( ) else: base_args["max_tokens"] = request_params.maxTokens - - if tools: - base_args["parallel_tool_calls"] = request_params.parallel_tool_calls + if tools: + base_args["parallel_tool_calls"] = request_params.parallel_tool_calls arguments: Dict[str, str] = self.prepare_provider_arguments( base_args, request_params, self.OPENAI_EXCLUDE_FIELDS.union(self.BASE_EXCLUDE_FIELDS)