From c74b46c991e5963ecbccee7a3a8477bc7db24824 Mon Sep 17 00:00:00 2001 From: cbornet Date: Thu, 8 Jan 2026 15:42:41 +0100 Subject: [PATCH 1/2] chore(langchain): fix types in test_framework --- .../tests/unit_tests/agents/any_str.py | 4 +- .../agents/middleware/core/test_framework.py | 231 ++++++++++-------- 2 files changed, 126 insertions(+), 109 deletions(-) diff --git a/libs/langchain_v1/tests/unit_tests/agents/any_str.py b/libs/langchain_v1/tests/unit_tests/agents/any_str.py index da0dea1a50897..0abf088ca1a1e 100644 --- a/libs/langchain_v1/tests/unit_tests/agents/any_str.py +++ b/libs/langchain_v1/tests/unit_tests/agents/any_str.py @@ -4,7 +4,7 @@ class AnyStr(str): __slots__ = ("prefix",) - def __init__(self, prefix: str | re.Pattern = "") -> None: + def __init__(self, prefix: str | re.Pattern[str] = "") -> None: super().__init__() self.prefix = prefix @@ -12,7 +12,7 @@ def __eq__(self, other: object) -> bool: return isinstance(other, str) and ( other.startswith(self.prefix) if isinstance(self.prefix, str) - else self.prefix.match(other) + else self.prefix.match(other) is not None ) def __hash__(self) -> int: diff --git a/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py b/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py index 5c904188b3801..a6d8654c8e987 100644 --- a/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py +++ b/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py @@ -1,6 +1,6 @@ import sys from collections.abc import Awaitable, Callable -from typing import Annotated, Any +from typing import Annotated, Any, Generic import pytest from langchain_core.language_models import GenericFakeChatModel @@ -11,6 +11,7 @@ from langgraph.runtime import Runtime from pydantic import BaseModel, Field from syrupy.assertion import SnapshotAssertion +from typing_extensions import override from langchain.agents.factory import create_agent from langchain.agents.middleware.types import ( @@ -22,6 +23,7 @@ OmitFromInput, OmitFromOutput, PrivateStateAttr, + ResponseT, after_agent, after_model, before_agent, @@ -36,38 +38,38 @@ def test_create_agent_invoke( snapshot: SnapshotAssertion, - sync_checkpointer: BaseCheckpointSaver, -): + sync_checkpointer: BaseCheckpointSaver[str], +) -> None: calls = [] class NoopSeven(AgentMiddleware): - def before_model(self, state, runtime): + def before_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("NoopSeven.before_model") def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: calls.append("NoopSeven.wrap_model_call") return handler(request) - def after_model(self, state, runtime): + def after_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("NoopSeven.after_model") class NoopEight(AgentMiddleware): - def before_model(self, state, runtime): + def before_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("NoopEight.before_model") def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: calls.append("NoopEight.wrap_model_call") return handler(request) - def after_model(self, state, runtime): + def after_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("NoopEight.after_model") @tool @@ -137,40 +139,40 @@ def my_tool(value: str) -> str: def test_create_agent_jump( snapshot: SnapshotAssertion, - sync_checkpointer: BaseCheckpointSaver, -): + sync_checkpointer: BaseCheckpointSaver[str], +) -> None: calls = [] class NoopSeven(AgentMiddleware): - def before_model(self, state, runtime): + def before_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("NoopSeven.before_model") def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: calls.append("NoopSeven.wrap_model_call") return handler(request) - def after_model(self, state, runtime): + def after_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("NoopSeven.after_model") class NoopEight(AgentMiddleware): @hook_config(can_jump_to=["end"]) - def before_model(self, state, runtime) -> dict[str, Any]: + def before_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]: calls.append("NoopEight.before_model") return {"jump_to": "end"} def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: calls.append("NoopEight.wrap_model_call") return handler(request) - def after_model(self, state, runtime): + def after_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("NoopEight.after_model") @tool @@ -222,15 +224,15 @@ def my_tool(input_string: str) -> str: class NoopZero(AgentMiddleware): @hook_config(can_jump_to=["end"]) - def before_agent(self, state, runtime) -> None: + def before_agent(self, state: AgentState[Any], runtime: Runtime) -> None: return None class NoopOne(AgentMiddleware): - def after_agent(self, state, runtime) -> None: + def after_agent(self, state: AgentState[Any], runtime: Runtime) -> None: return None class NoopTwo(AgentMiddleware): - def after_agent(self, state, runtime) -> None: + def after_agent(self, state: AgentState[Any], runtime: Runtime) -> None: return None agent_one = create_agent( @@ -250,8 +252,8 @@ class ModifyMiddleware(AgentMiddleware): def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: request.messages.append(HumanMessage("remember to be nice!")) return handler(request) @@ -270,7 +272,7 @@ def wrap_model_call( ) -def test_tools_to_model_edge_with_structured_and_regular_tool_calls(): +def test_tools_to_model_edge_with_structured_and_regular_tool_calls() -> None: """Test tools to model edge with structured and regular tool calls. Test that when there are both structured and regular tool calls, we execute regular @@ -290,7 +292,7 @@ def regular_tool(query: str) -> str: # Create a fake model that returns both structured and regular tool calls class FakeModelWithBothToolCalls(FakeToolCallingModel): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tool_calls = [ [ @@ -351,7 +353,7 @@ def __init__(self): def test_public_private_state_for_custom_middleware() -> None: """Test public and private state for custom middleware.""" - class CustomState(AgentState): + class CustomState(AgentState[Any]): omit_input: Annotated[str, OmitFromInput] omit_output: Annotated[str, OmitFromOutput] private_state: Annotated[str, PrivateStateAttr] @@ -359,7 +361,8 @@ class CustomState(AgentState): class CustomMiddleware(AgentMiddleware[CustomState]): state_schema: type[CustomState] = CustomState - def before_model(self, state: CustomState) -> dict[str, Any]: + @override + def before_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]: assert "omit_input" not in state assert "omit_output" in state assert "private_state" not in state @@ -383,18 +386,18 @@ def test_runtime_injected_into_middleware() -> None: """Test that the runtime is injected into the middleware.""" class CustomMiddleware(AgentMiddleware): - def before_model(self, state: AgentState, runtime: Runtime) -> None: + def before_model(self, state: AgentState[Any], runtime: Runtime) -> None: assert runtime is not None def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: assert request.runtime is not None return handler(request) - def after_model(self, state: AgentState, runtime: Runtime) -> None: + def after_model(self, state: AgentState[Any], runtime: Runtime) -> None: assert runtime is not None agent = create_agent(model=FakeToolCallingModel(), middleware=[CustomMiddleware()]) @@ -405,7 +408,7 @@ def after_model(self, state: AgentState, runtime: Runtime) -> None: # custom state w/in a function -class CustomState(AgentState): +class CustomState(AgentState[ResponseT], Generic[ResponseT]): custom_state: str @@ -462,11 +465,11 @@ def test_injected_state_in_middleware_agent() -> None: def test_jump_to_is_ephemeral() -> None: class MyMiddleware(AgentMiddleware): - def before_model(self, state: AgentState) -> dict[str, Any]: + def before_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]: assert "jump_to" not in state return {"jump_to": "model"} - def after_model(self, state: AgentState) -> dict[str, Any]: + def after_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]: assert "jump_to" not in state return {"jump_to": "model"} @@ -482,8 +485,8 @@ class AsyncOnlyMiddleware(AgentMiddleware): async def awrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], Awaitable[AIMessage]], - ) -> AIMessage: + handler: Callable[[ModelRequest], Awaitable[ModelResponse]], + ) -> ModelCallResult: return await handler(request) agent = create_agent( @@ -502,25 +505,25 @@ def test_create_agent_sync_invoke_with_mixed_middleware() -> None: calls = [] class MixedMiddleware(AgentMiddleware): - def before_model(self, state, runtime) -> None: + def before_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("MixedMiddleware.before_model") - async def abefore_model(self, state, runtime) -> None: + async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("MixedMiddleware.abefore_model") def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: calls.append("MixedMiddleware.wrap_model_call") return handler(request) async def awrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], Awaitable[AIMessage]], - ) -> AIMessage: + handler: Callable[[ModelRequest], Awaitable[ModelResponse]], + ) -> ModelCallResult: calls.append("MixedMiddleware.awrap_model_call") return await handler(request) @@ -550,19 +553,19 @@ async def test_create_agent_async_invoke() -> None: calls = [] class AsyncMiddleware(AgentMiddleware): - async def abefore_model(self, state, runtime) -> None: + async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddleware.abefore_model") async def awrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], Awaitable[AIMessage]], - ) -> AIMessage: + handler: Callable[[ModelRequest], Awaitable[ModelResponse]], + ) -> ModelCallResult: calls.append("AsyncMiddleware.awrap_model_call") request.messages.append(HumanMessage("async middleware message")) return await handler(request) - async def aafter_model(self, state, runtime) -> None: + async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddleware.aafter_model") @tool @@ -611,33 +614,33 @@ async def test_create_agent_async_invoke_multiple_middleware() -> None: calls = [] class AsyncMiddlewareOne(AgentMiddleware): - async def abefore_model(self, state, runtime) -> None: + async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddlewareOne.abefore_model") async def awrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], Awaitable[AIMessage]], - ) -> AIMessage: + handler: Callable[[ModelRequest], Awaitable[ModelResponse]], + ) -> ModelCallResult: calls.append("AsyncMiddlewareOne.awrap_model_call") return await handler(request) - async def aafter_model(self, state, runtime) -> None: + async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddlewareOne.aafter_model") class AsyncMiddlewareTwo(AgentMiddleware): - async def abefore_model(self, state, runtime) -> None: + async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddlewareTwo.abefore_model") async def awrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], Awaitable[AIMessage]], - ) -> AIMessage: + handler: Callable[[ModelRequest], Awaitable[ModelResponse]], + ) -> ModelCallResult: calls.append("AsyncMiddlewareTwo.awrap_model_call") return await handler(request) - async def aafter_model(self, state, runtime) -> None: + async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddlewareTwo.aafter_model") agent = create_agent( @@ -664,12 +667,14 @@ async def test_create_agent_async_jump() -> None: calls = [] class AsyncMiddlewareOne(AgentMiddleware): - async def abefore_model(self, state, runtime) -> None: + async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddlewareOne.abefore_model") class AsyncMiddlewareTwo(AgentMiddleware): @hook_config(can_jump_to=["end"]) - async def abefore_model(self, state, runtime) -> dict[str, Any]: + async def abefore_model( + self, state: AgentState[Any], runtime: Runtime + ) -> dict[str, Any]: calls.append("AsyncMiddlewareTwo.abefore_model") return {"jump_to": "end"} @@ -699,14 +704,14 @@ async def test_create_agent_mixed_sync_async_middleware_async_invoke() -> None: calls = [] class MostlySyncMiddleware(AgentMiddleware): - def before_model(self, state, runtime) -> None: + def before_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("MostlySyncMiddleware.before_model") def wrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], AIMessage], - ) -> AIMessage: + handler: Callable[[ModelRequest], ModelResponse], + ) -> ModelCallResult: calls.append("MostlySyncMiddleware.wrap_model_call") return handler(request) @@ -718,22 +723,22 @@ async def awrap_model_call( calls.append("MostlySyncMiddleware.awrap_model_call") return await handler(request) - def after_model(self, state, runtime) -> None: + def after_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("MostlySyncMiddleware.after_model") class AsyncMiddleware(AgentMiddleware): - async def abefore_model(self, state, runtime) -> None: + async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddleware.abefore_model") async def awrap_model_call( self, request: ModelRequest, - handler: Callable[[ModelRequest], Awaitable[AIMessage]], - ) -> AIMessage: + handler: Callable[[ModelRequest], Awaitable[ModelResponse]], + ) -> ModelCallResult: calls.append("AsyncMiddleware.awrap_model_call") return await handler(request) - async def aafter_model(self, state, runtime) -> None: + async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None: calls.append("AsyncMiddleware.aafter_model") agent = create_agent( @@ -776,7 +781,9 @@ async def test_hook_execution(self, *, is_async: bool, hook_type: str) -> None: if hook_type == "before": @before_agent - async def log_hook(state: AgentState, runtime) -> dict[str, Any] | None: + async def log_hook( + state: AgentState[Any], *_args: Any, **_kwargs: Any + ) -> dict[str, Any] | None: execution_log.append(f"{hook_type}_agent_called") execution_log.append(f"message_count: {len(state['messages'])}") return None @@ -784,7 +791,9 @@ async def log_hook(state: AgentState, runtime) -> dict[str, Any] | None: else: @after_agent - async def log_hook(state: AgentState, runtime) -> dict[str, Any] | None: + async def log_hook( + state: AgentState[Any], *_args: Any, **_kwargs: Any + ) -> dict[str, Any] | None: execution_log.append(f"{hook_type}_agent_called") execution_log.append(f"message_count: {len(state['messages'])}") return None @@ -792,7 +801,9 @@ async def log_hook(state: AgentState, runtime) -> dict[str, Any] | None: elif hook_type == "before": @before_agent - def log_hook(state: AgentState, runtime) -> dict[str, Any] | None: + def log_hook( + state: AgentState[Any], *_args: Any, **_kwargs: Any + ) -> dict[str, Any] | None: execution_log.append(f"{hook_type}_agent_called") execution_log.append(f"message_count: {len(state['messages'])}") return None @@ -800,7 +811,9 @@ def log_hook(state: AgentState, runtime) -> dict[str, Any] | None: else: @after_agent - def log_hook(state: AgentState, runtime) -> dict[str, Any] | None: + def log_hook( + state: AgentState[Any], *_args: Any, **_kwargs: Any + ) -> dict[str, Any] | None: execution_log.append(f"{hook_type}_agent_called") execution_log.append(f"message_count: {len(state['messages'])}") return None @@ -822,33 +835,37 @@ async def test_hook_with_class_inheritance(self, *, is_async: bool, hook_type: s """Test agent hooks using class inheritance in both sync and async modes.""" execution_log: list[str] = [] - if is_async: - - class CustomMiddleware(AgentMiddleware): - async def abefore_agent(self, state: AgentState, runtime) -> dict[str, Any] | None: - if hook_type == "before": - execution_log.append("hook_called") - return None - - async def aafter_agent(self, state: AgentState, runtime) -> dict[str, Any] | None: - if hook_type == "after": - execution_log.append("hook_called") - return None + class AsyncCustomMiddleware(AgentMiddleware): + async def abefore_agent( + self, state: AgentState[Any], runtime: Runtime + ) -> dict[str, Any] | None: + if hook_type == "before": + execution_log.append("hook_called") + return None - else: + async def aafter_agent( + self, state: AgentState[Any], runtime: Runtime + ) -> dict[str, Any] | None: + if hook_type == "after": + execution_log.append("hook_called") + return None - class CustomMiddleware(AgentMiddleware): - def before_agent(self, state: AgentState, runtime) -> dict[str, Any] | None: - if hook_type == "before": - execution_log.append("hook_called") - return None + class CustomMiddleware(AgentMiddleware): + def before_agent( + self, state: AgentState[Any], runtime: Runtime + ) -> dict[str, Any] | None: + if hook_type == "before": + execution_log.append("hook_called") + return None - def after_agent(self, state: AgentState, runtime) -> dict[str, Any] | None: - if hook_type == "after": - execution_log.append("hook_called") - return None + def after_agent( + self, state: AgentState[Any], runtime: Runtime + ) -> dict[str, Any] | None: + if hook_type == "after": + execution_log.append("hook_called") + return None - middleware = CustomMiddleware() + middleware = AsyncCustomMiddleware() if is_async else CustomMiddleware() model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")])) agent = create_agent(model=model, tools=[], middleware=[middleware]) @@ -871,21 +888,21 @@ async def test_execution_order(self, *, is_async: bool) -> None: if is_async: @before_agent - async def log_before(state: AgentState, runtime) -> None: + async def log_before(*_args: Any, **_kwargs: Any) -> None: execution_log.append("before") @after_agent - async def log_after(state: AgentState, runtime) -> None: + async def log_after(*_args: Any, **_kwargs: Any) -> None: execution_log.append("after") else: @before_agent - def log_before(state: AgentState, runtime) -> None: + def log_before(*_args: Any, **_kwargs: Any) -> None: execution_log.append("before") @after_agent - def log_after(state: AgentState, runtime) -> None: + def log_after(*_args: Any, **_kwargs: Any) -> None: execution_log.append("after") model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")])) @@ -902,7 +919,7 @@ def test_state_passthrough(self) -> None: """Test that state modifications in before_agent are visible to after_agent.""" @before_agent - def modify_in_before(state: AgentState, runtime) -> dict[str, Any]: + def modify_in_before(*_args: Any, **_kwargs: Any) -> dict[str, Any]: return {"messages": [HumanMessage("Added by before_agent")]} model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")])) @@ -917,19 +934,19 @@ def test_multiple_middleware_instances(self) -> None: execution_log = [] @before_agent - def before_one(state: AgentState, runtime) -> None: + def before_one(*_args: Any, **_kwargs: Any) -> None: execution_log.append("before_1") @before_agent - def before_two(state: AgentState, runtime) -> None: + def before_two(*_args: Any, **_kwargs: Any) -> None: execution_log.append("before_2") @after_agent - def after_one(state: AgentState, runtime) -> None: + def after_one(*_args: Any, **_kwargs: Any) -> None: execution_log.append("after_1") @after_agent - def after_two(state: AgentState, runtime) -> None: + def after_two(*_args: Any, **_kwargs: Any) -> None: execution_log.append("after_2") model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")])) @@ -956,19 +973,19 @@ def sample_tool_agent(query: str) -> str: return f"Result for: {query}" @before_agent - def log_before_agent(state: AgentState, runtime) -> None: + def log_before_agent(*_args: Any, **_kwargs: Any) -> None: execution_log.append("before_agent") @before_model - def log_before_model(state: AgentState, runtime) -> None: + def log_before_model(*_args: Any, **_kwargs: Any) -> None: execution_log.append("before_model") @after_agent - def log_after_agent(state: AgentState, runtime) -> None: + def log_after_agent(*_args: Any, **_kwargs: Any) -> None: execution_log.append("after_agent") @after_model - def log_after_model(state: AgentState, runtime) -> None: + def log_after_model(*_args: Any, **_kwargs: Any) -> None: execution_log.append("after_model") # Model will call a tool twice, then respond with final answer From d2044c4405af79ece72ac09293970a64e1c12e26 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Fri, 9 Jan 2026 17:19:36 -0500 Subject: [PATCH 2/2] ci --- .../unit_tests/agents/middleware/core/test_framework.py | 4 +--- libs/langchain_v1/uv.lock | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py b/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py index a6d8654c8e987..76ed61f99db9b 100644 --- a/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py +++ b/libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py @@ -672,9 +672,7 @@ async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None: class AsyncMiddlewareTwo(AgentMiddleware): @hook_config(can_jump_to=["end"]) - async def abefore_model( - self, state: AgentState[Any], runtime: Runtime - ) -> dict[str, Any]: + async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]: calls.append("AsyncMiddlewareTwo.abefore_model") return {"jump_to": "end"} diff --git a/libs/langchain_v1/uv.lock b/libs/langchain_v1/uv.lock index 1a4b3298d1e94..4bc2eeddf7c97 100644 --- a/libs/langchain_v1/uv.lock +++ b/libs/langchain_v1/uv.lock @@ -2102,7 +2102,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "1.2.6" +version = "1.2.7" source = { editable = "../core" } dependencies = [ { name = "jsonpatch" }, @@ -2380,7 +2380,7 @@ requires-dist = [ ] [package.metadata.requires-dev] -lint = [{ name = "ruff", specifier = ">=0.14.10,<0.15.0" }] +lint = [{ name = "ruff", specifier = ">=0.14.11,<0.15.0" }] test = [{ name = "langchain-core", editable = "../core" }] test-integration = [] typing = [ @@ -2407,7 +2407,7 @@ dev = [ ] lint = [ { name = "langchain-core", editable = "../core" }, - { name = "ruff", specifier = ">=0.14.10,<0.15.0" }, + { name = "ruff", specifier = ">=0.14.11,<0.15.0" }, ] test = [ { name = "freezegun", specifier = ">=1.2.2,<2.0.0" },