diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 02475b5a8be40..26a8bb787a207 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -9,7 +9,6 @@ from typing import Any, Literal, cast import httpx -import openai import pytest from langchain_core.callbacks import CallbackManager from langchain_core.messages import ( @@ -23,11 +22,7 @@ ToolMessage, ) from langchain_core.outputs import ChatGeneration, ChatResult, LLMResult -from langchain_tests.integration_tests.chat_models import ( - _validate_tool_call_message, - magic_function, -) -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, field_validator from typing_extensions import TypedDict from langchain_openai import ChatOpenAI @@ -772,49 +767,6 @@ def test_image_token_counting_png() -> None: assert expected == actual -@pytest.mark.parametrize("use_responses_api", [False, True]) -def test_tool_calling_strict(use_responses_api: bool) -> None: - """Test tool calling with strict=True. - - Responses API appears to have fewer constraints on schema when strict=True. - """ - - class magic_function_notrequired_arg(BaseModel): # noqa: N801 - """Applies a magic function to an input.""" - - input: int | None = Field(default=None) - - model = ChatOpenAI( - model="gpt-5-nano", temperature=0, use_responses_api=use_responses_api - ) - # N.B. magic_function adds metadata to schema (min/max for number fields) - model_with_tools = model.bind_tools([magic_function], strict=True) - # Having a not-required argument in the schema remains invalid. - model_with_invalid_tool_schema = model.bind_tools( - [magic_function_notrequired_arg], strict=True - ) - - # Test invoke - query = "What is the value of magic_function(3)? Use the tool." - response = model_with_tools.invoke(query) - _validate_tool_call_message(response) - - # Test invalid tool schema - with pytest.raises(openai.BadRequestError): - model_with_invalid_tool_schema.invoke(query) - - # Test stream - full: BaseMessageChunk | None = None - for chunk in model_with_tools.stream(query): - full = chunk if full is None else full + chunk # type: ignore - assert isinstance(full, AIMessage) - _validate_tool_call_message(full) - - # Test invalid tool schema - with pytest.raises(openai.BadRequestError): - next(model_with_invalid_tool_schema.stream(query)) - - @pytest.mark.parametrize("use_responses_api", [False, True]) @pytest.mark.parametrize( ("model", "method"),