diff --git a/src/strands/experimental/__init__.py b/src/strands/experimental/__init__.py index 188c80c69..e330f1086 100644 --- a/src/strands/experimental/__init__.py +++ b/src/strands/experimental/__init__.py @@ -1,6 +1,13 @@ """Experimental features. This module implements experimental features that are subject to change in future revisions without notice. + +Available submodules: +- conversation_manager: Experimental conversation management strategies +- tools: Experimental tool providers + +Note: Import experimental features directly from their submodules to avoid circular dependencies. +Example: from strands.experimental.conversation_manager import MappingConversationManager """ from . import tools diff --git a/src/strands/experimental/conversation_manager/__init__.py b/src/strands/experimental/conversation_manager/__init__.py new file mode 100644 index 000000000..e8c83a2d4 --- /dev/null +++ b/src/strands/experimental/conversation_manager/__init__.py @@ -0,0 +1,16 @@ +"""Experimental conversation management strategies. + +This module implements experimental conversation managers that are subject to change. +""" + +from .mapping_conversation_manager import ( + LargeToolResultMapper, + MappingConversationManager, + MessageMapper, +) + +__all__ = [ + "MappingConversationManager", + "MessageMapper", + "LargeToolResultMapper", +] diff --git a/src/strands/experimental/conversation_manager/mapping_conversation_manager.py b/src/strands/experimental/conversation_manager/mapping_conversation_manager.py new file mode 100644 index 000000000..ae13bcce8 --- /dev/null +++ b/src/strands/experimental/conversation_manager/mapping_conversation_manager.py @@ -0,0 +1,457 @@ +"""Conversation manager that applies message mapping/transformation functions. + +This module provides a simple, flexible approach to conversation management through +composable message mappers. Instead of complex strategy hierarchies, users provide +callable functions or classes that map messages to transformed versions or None. +""" + +import copy +import logging +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from typing_extensions import Protocol, override + +from ...agent.conversation_manager.conversation_manager import ConversationManager +from ...types.content import Message, Messages +from ...types.tools import ToolResult, ToolResultContent + +if TYPE_CHECKING: + from ...agent.agent import Agent + +logger = logging.getLogger(__name__) + + +class MessageMapper(Protocol): + """Protocol for message mapping/transformation functions. + + A MessageMapper is any callable that takes a message and its context, + and returns either a transformed message or None (to remove it). + + This protocol enables both simple lambda functions and complex class-based + mappers to be used interchangeably. + + Important: Mappers should be stateless with respect to conversation history. + While configuration parameters (like thresholds or templates) are acceptable, + mappers should not accumulate state across invocations since mapper-specific + state is not persisted by the conversation manager. + + Example: + # Simple lambda mapper + remove_old = lambda msg, idx, msgs: None if idx < 5 else msg + + # Class-based mapper with configuration (stateless) + class CustomMapper: + def __init__(self, threshold: int): + self.threshold = threshold # Configuration only, not accumulated state + + def __call__(self, message, index, messages): + # transformation logic using self.threshold + return transformed_message + """ + + def __call__(self, message: Message, index: int, messages: Messages) -> Optional[Message]: + """Transform a message. + + Args: + message: The message to transform + index: The index of this message in the conversation + messages: The full conversation history (read-only) + + Returns: + Transformed message, or None to remove the message entirely + """ + ... + + +class LargeToolResultMapper: + """Maps messages by compressing large tool results while preserving structure. + + This mapper identifies tool results that exceed a token threshold and compresses + them using simple heuristics (text truncation, JSON summarization) while + maintaining essential information about tool execution status. + + This mapper is stateless - it only uses configuration parameters and does not + accumulate state across invocations. + + Example: + mapper = LargeToolResultMapper( + max_tokens=50_000, + truncate_at=500, + compression_template="[Compressed: {original_size} -> {compressed_size} tokens]" + ) + + manager = MappingConversationManager( + mapper=mapper, + preserve_first=1, + preserve_last=2 + ) + """ + + def __init__( + self, + max_tokens: int = 50_000, + truncate_at: int = 500, + compression_template: str = ( + "[Tool result compressed: {original_size} tokens -> {compressed_size} tokens. Original status: {status}]" + ), + ): + """Initialize the large tool result mapper. + + Args: + max_tokens: Maximum tokens allowed in tool results before compression + truncate_at: Character length threshold for truncating text content + compression_template: Template string for compression messages. + Available placeholders: {original_size}, {compressed_size}, {status} + """ + self.max_tokens = max_tokens + self.truncate_at = truncate_at + self.compression_template = compression_template + + def __call__(self, message: Message, index: int, messages: Messages) -> Optional[Message]: + """Transform message by compressing large tool results. + + Args: + message: The message to potentially compress + index: Index of this message in the conversation + messages: Full conversation history + + Returns: + Message with compressed tool results, or original if no compression needed + """ + # Check if message has tool results that need compression + has_large_result = False + for content in message.get("content", []): + if "toolResult" in content: + result_size = self._estimate_tool_result_tokens(content["toolResult"]) + if result_size > self.max_tokens: + has_large_result = True + break + + if not has_large_result: + return message + + # Create a deep copy and compress tool results + compressed_message = copy.deepcopy(message) + + for content in compressed_message.get("content", []): + if "toolResult" in content: + tool_result = content["toolResult"] + original_size = self._estimate_tool_result_tokens(tool_result) + + if original_size > self.max_tokens: + compressed_result = self._compress_tool_result(tool_result) + content["toolResult"] = compressed_result + + compressed_size = self._estimate_tool_result_tokens(compressed_result) + logger.info( + "Compressed tool result at index %d: %d -> %d tokens", + index, + original_size, + compressed_size, + ) + + return compressed_message + + def _estimate_tool_result_tokens(self, tool_result: ToolResult) -> int: + """Estimate token count for a tool result. + + Uses a simple heuristic: ~4 characters per token on average. + + Args: + tool_result: The tool result to estimate + + Returns: + Estimated token count + """ + total_tokens = 0 + + for content_item in tool_result.get("content", []): + if "text" in content_item: + char_count = len(content_item["text"]) + total_tokens += int(char_count / 4) + elif "json" in content_item: + json_str = str(content_item["json"]) + char_count = len(json_str) + total_tokens += int(char_count / 4) + elif "document" in content_item: + total_tokens += len(content_item["document"]["source"]["bytes"]) + elif "image" in content_item: + total_tokens += len(content_item["image"]["source"]["bytes"]) + + return total_tokens + + def _compress_tool_result(self, tool_result: ToolResult) -> ToolResult: + """Apply compression to tool result. + + Compression strategies: + - Text: Truncate long text content + - JSON: Summarize large JSON objects/arrays + - Other: Keep as-is + + Args: + tool_result: The tool result to compress + + Returns: + Compressed tool result + """ + original_size = self._estimate_tool_result_tokens(tool_result) + compressed_content: List[ToolResultContent] = [] + + for content_item in tool_result.get("content", []): + if "text" in content_item: + text = content_item["text"] + if len(text) > self.truncate_at: + compressed_text = text[: self.truncate_at] + f"... [truncated from {len(text)} chars]" + compressed_content.append({"text": compressed_text}) + else: + compressed_content.append(content_item) + + elif "json" in content_item: + json_data = content_item["json"] + json_str = str(json_data) + + if len(json_str) > 500: + if isinstance(json_data, dict): + compressed_json = { + "_compressed": True, + "_type": "dict", + "_original_keys": len(json_data.keys()), + "_size": len(json_str), + } + # Include small values as samples + for idx, (key, value) in enumerate(json_data.items()): + if idx >= 3: # Limit to first 3 items + break + value_str = str(value) + if len(value_str) < 100: + compressed_json[key] = value + + compressed_content.append({"json": compressed_json}) + + elif isinstance(json_data, list): + sample_list: List[Any] = [] + # Include small items as samples + for idx, item in enumerate(json_data): + if idx >= 3: # Limit to first 3 items + break + if len(str(item)) < 100: + sample_list.append(item) + + compressed_json = { + "_compressed": True, + "_type": "list", + "_length": len(json_data), + "_size": len(json_str), + "_sample": sample_list, + } + + compressed_content.append({"json": compressed_json}) + else: + compressed_content.append(content_item) + else: + compressed_content.append(content_item) + + else: + # Keep other content types (documents, images) as-is + compressed_content.append(content_item) + + # Calculate compressed size for reporting + compressed_size = self._estimate_tool_result_tokens( + ToolResult( + content=compressed_content, + status=tool_result["status"], + toolUseId=tool_result["toolUseId"], + ) + ) + + # Prepend compression note + compression_note = self.compression_template.format( + original_size=original_size, + compressed_size=compressed_size, + status=tool_result["status"], + ) + + final_content: List[ToolResultContent] = [{"text": compression_note}, *compressed_content] + + return ToolResult( + content=final_content, + status=tool_result["status"], + toolUseId=tool_result["toolUseId"], + ) + + +class MappingConversationManager(ConversationManager): + """Conversation manager that applies message mapping functions. + + This manager provides a simple, composable approach to conversation management. + Instead of inheritance hierarchies, users provide callable mapper functions that + transform or remove messages. Mappers are applied to messages in the "prunable" + range (excluding preserved initial and recent messages). + + Example: + # Using built-in mapper + manager = MappingConversationManager( + mapper=LargeToolResultMapper(max_tokens=100_000), + preserve_first=1, + preserve_last=2 + ) + + # Using lambda for simple cases + manager = MappingConversationManager( + mapper=lambda msg, idx, msgs: None if should_remove(msg) else msg + ) + + # Using custom mapper class + manager = MappingConversationManager( + mapper=CustomMapper(config_value=42) + ) + """ + + def __init__( + self, + mapper: MessageMapper, + preserve_first: int = 1, + preserve_last: int = 2, + ): + """Initialize the mapping conversation manager. + + Args: + mapper: Message mapper function to apply. Mappers should be stateless with + respect to conversation history. While configuration parameters (like + thresholds) are acceptable, mappers should not accumulate state across + invocations since mapper-specific state is not persisted by the + conversation manager. + preserve_first: Number of initial messages to never map/remove + preserve_last: Number of recent messages to never map/remove + """ + super().__init__() + self.mapper = mapper + self.preserve_first = preserve_first + self.preserve_last = preserve_last + + @override + def apply_management(self, agent: "Agent", **kwargs: Any) -> None: + """Apply message mapping if there are prunable messages. + + Args: + agent: The agent whose conversation will be managed + **kwargs: Additional keyword arguments for extensibility + """ + original_count = len(agent.messages) + if not self._can_apply_mappers(agent): + logger.debug( + "Too few messages to map safely: %d messages, %d preserved", + original_count, + self.preserve_first + self.preserve_last, + ) + return + + self.reduce_context(agent, **kwargs) + + @override + def reduce_context( + self, + agent: "Agent", + e: Optional[Exception] = None, + **kwargs: Any, # noqa: ARG002 + ) -> None: + """Reduce context by applying message mappers. + + Applies all configured mappers to messages in the prunable range + (excluding preserved initial and recent messages). + + Args: + agent: The agent whose conversation will be reduced + e: The exception that triggered reduction, if any + **kwargs: Additional keyword arguments for extensibility + """ + original_count = len(agent.messages) + if not self._can_apply_mappers(agent): + logger.warning( + "Too few messages to map safely: %d messages, %d preserved", + original_count, + self.preserve_first + self.preserve_last, + ) + return + + mapped_messages, removed_count = self._apply_mapper(agent.messages) + agent.messages[:] = mapped_messages + self.removed_message_count += removed_count + + logger.info( + "Mapping completed: %d -> %d messages (%d removed)", + original_count, + len(mapped_messages), + removed_count, + ) + + def _can_apply_mappers(self, agent: "Agent") -> bool: + """Check if there are enough messages to safely apply mappers. + + Args: + agent: The agent to check + + Returns: + True if there are messages in the prunable range + """ + original_count = len(agent.messages) + total_preserved = self.preserve_first + self.preserve_last + return original_count > total_preserved + + def _apply_mapper(self, messages: Messages) -> tuple[Messages, int]: + """Apply mapper to prunable messages. + + Args: + messages: The messages to map + + Returns: + Tuple of (mapped messages, count of removed messages) + """ + min_mappable_index = self.preserve_first + max_mappable_index = len(messages) - self.preserve_last + + mapped_messages = [] + removed_count = 0 + + for i, message in enumerate(messages): + # Preserve initial messages + if i < min_mappable_index: + mapped_messages.append(message) + continue + + # Preserve recent messages + if i >= max_mappable_index: + mapped_messages.append(message) + continue + + # Apply mapper to prunable messages + current_message = self.mapper(message, i, messages) + + if current_message is not None: + mapped_messages.append(current_message) + else: + removed_count += 1 + + return mapped_messages, removed_count + + @override + def get_state(self) -> Dict[str, Any]: + """Get current state for session persistence. + + Returns: + Dictionary containing manager state + """ + return super().get_state() + + @override + def restore_from_session(self, state: Dict[str, Any]) -> Optional[List[Message]]: + """Restore manager state from session. + + Args: + state: State dictionary to restore from + + Returns: + None (no messages to prepend) + """ + super().restore_from_session(state) + return None diff --git a/tests/strands/experimental/conversation_manager/__init__.py b/tests/strands/experimental/conversation_manager/__init__.py new file mode 100644 index 000000000..91086c498 --- /dev/null +++ b/tests/strands/experimental/conversation_manager/__init__.py @@ -0,0 +1 @@ +"""Tests for experimental conversation managers.""" diff --git a/tests/strands/experimental/conversation_manager/test_mapping_conversation_manager.py b/tests/strands/experimental/conversation_manager/test_mapping_conversation_manager.py new file mode 100644 index 000000000..0e06bcf72 --- /dev/null +++ b/tests/strands/experimental/conversation_manager/test_mapping_conversation_manager.py @@ -0,0 +1,394 @@ +"""Tests for MappingConversationManager and message mappers.""" + +from typing import Optional +from unittest.mock import Mock + +import pytest + +from strands.experimental.conversation_manager.mapping_conversation_manager import ( + LargeToolResultMapper, + MappingConversationManager, +) +from strands.types.content import Message, Messages + + +def simple_remove_old_mapper(message: Message, index: int, messages: Messages) -> Optional[Message]: + """Test mapper that removes messages containing 'Old message'.""" + for content in message.get("content", []): + if "text" in content and "Old message" in content["text"]: + return None + return message + + +class TestMappingConversationManager: + """Test the MappingConversationManager class.""" + + @pytest.fixture + def large_result_mapper(self): + return LargeToolResultMapper(max_tokens=100) + + @pytest.fixture + def manager(self, large_result_mapper): + return MappingConversationManager( + mapper=large_result_mapper, + preserve_first=1, + preserve_last=2, + ) + + @pytest.fixture + def simple_manager(self): + """Manager with simple test mapper.""" + return MappingConversationManager( + mapper=simple_remove_old_mapper, + preserve_first=1, + preserve_last=2, + ) + + @pytest.fixture + def mock_agent(self): + agent = Mock() + agent.messages = [] + return agent + + def test_initialization(self, large_result_mapper): + """Test manager initialization with parameters.""" + manager = MappingConversationManager( + mapper=large_result_mapper, + preserve_first=2, + preserve_last=5, + ) + + assert manager.mapper == large_result_mapper + assert manager.preserve_first == 2 + assert manager.preserve_last == 5 + assert manager.removed_message_count == 0 + + def test_reduce_context_with_empty_messages(self, manager, mock_agent): + """Test that reduce_context returns early with empty messages.""" + mock_agent.messages = [] + + # Should return early without raising exception + manager.reduce_context(mock_agent) + assert len(mock_agent.messages) == 0 + assert manager.removed_message_count == 0 + + def test_reduce_context_with_insufficient_messages(self, manager, mock_agent): + """Test behavior with too few messages to map safely.""" + mock_agent.messages = [{"role": "user", "content": [{"text": "Message 1"}]}] + + # Should return early without raising exception + manager.reduce_context(mock_agent) + assert len(mock_agent.messages) == 1 + assert manager.removed_message_count == 0 + + def test_successful_mapping_removes_messages(self, simple_manager, mock_agent): + """Test successful message removal via mapping.""" + mock_agent.messages = [ + {"role": "user", "content": [{"text": "Initial"}]}, # Preserved + {"role": "user", "content": [{"text": "Old message 1"}]}, # Removed + {"role": "user", "content": [{"text": "Old message 2"}]}, # Removed + {"role": "user", "content": [{"text": "Recent 1"}]}, # Preserved + {"role": "user", "content": [{"text": "Recent 2"}]}, # Preserved + ] + + original_count = len(mock_agent.messages) + simple_manager.reduce_context(mock_agent) + + # Should have removed the "Old message" entries + assert len(mock_agent.messages) < original_count + assert simple_manager.removed_message_count == 2 + + # Check preserved messages remain + assert mock_agent.messages[0]["content"][0]["text"] == "Initial" + assert "Recent" in mock_agent.messages[-1]["content"][0]["text"] + + def test_preserve_first_messages(self, simple_manager, mock_agent): + """Test that first messages are never mapped.""" + mock_agent.messages = [ + {"role": "user", "content": [{"text": "Old message initial"}]}, # Should be preserved despite "Old" + {"role": "user", "content": [{"text": "Old message 2"}]}, # Should be removed + {"role": "user", "content": [{"text": "Recent 1"}]}, + {"role": "user", "content": [{"text": "Recent 2"}]}, + ] + + simple_manager.reduce_context(mock_agent) + + # First message should still be there even though it contains "Old message" + assert mock_agent.messages[0]["content"][0]["text"] == "Old message initial" + + def test_preserve_last_messages(self, simple_manager, mock_agent): + """Test that last messages are never mapped.""" + mock_agent.messages = [ + {"role": "user", "content": [{"text": "Initial"}]}, + {"role": "user", "content": [{"text": "Old message"}]}, # Should be removed + {"role": "user", "content": [{"text": "Old message recent 1"}]}, # Should be preserved + {"role": "user", "content": [{"text": "Old message recent 2"}]}, # Should be preserved + ] + + simple_manager.reduce_context(mock_agent) + + # Last two messages should be preserved + assert len(mock_agent.messages) >= 2 + assert "Old message recent" in mock_agent.messages[-1]["content"][0]["text"] + assert "Old message recent" in mock_agent.messages[-2]["content"][0]["text"] + + def test_lambda_mapper(self, mock_agent): + """Test using lambda functions as mappers.""" + + # Lambda that removes messages with "remove" in text + def remove_mapper(msg, idx, msgs): + return None if any("remove" in c.get("text", "") for c in msg.get("content", [])) else msg + + manager = MappingConversationManager( + mapper=remove_mapper, + preserve_first=1, + preserve_last=1, + ) + + mock_agent.messages = [ + {"role": "user", "content": [{"text": "Keep 1"}]}, + {"role": "user", "content": [{"text": "remove this"}]}, + {"role": "user", "content": [{"text": "Keep 2"}]}, + ] + + manager.reduce_context(mock_agent) + assert len(mock_agent.messages) == 2 + assert mock_agent.messages[0]["content"][0]["text"] == "Keep 1" + assert mock_agent.messages[1]["content"][0]["text"] == "Keep 2" + + def test_proactive_mapping_with_no_prunable_messages(self, manager, mock_agent): + """Test that proactive mapping does not trigger with no prunable messages.""" + # Only 3 messages total, with preserve_first=1 and preserve_last=2 + mock_agent.messages = [ + {"role": "user", "content": [{"text": "Initial"}]}, + {"role": "user", "content": [{"text": "Recent 1"}]}, + {"role": "user", "content": [{"text": "Recent 2"}]}, + ] + + # Should not trigger mapping (no prunable messages) + manager.apply_management(mock_agent) + assert len(mock_agent.messages) == 3 # No change + + def test_proactive_mapping_with_prunable_messages(self, simple_manager, mock_agent): + """Test that proactive mapping triggers when there are prunable messages.""" + mock_agent.messages = [ + {"role": "user", "content": [{"text": "Initial"}]}, + {"role": "user", "content": [{"text": "Old message"}]}, + {"role": "user", "content": [{"text": "Recent 1"}]}, + {"role": "user", "content": [{"text": "Recent 2"}]}, + ] + + # Should trigger mapping and remove "Old message" + simple_manager.apply_management(mock_agent) + assert len(mock_agent.messages) == 3 + assert simple_manager.removed_message_count == 1 + + def test_state_persistence(self, manager): + """Test getting and restoring state.""" + manager.removed_message_count = 5 + state = manager.get_state() + + assert state["removed_message_count"] == 5 + assert state["__name__"] == "MappingConversationManager" + + # Create a no-op mapper for testing + def noop_mapper(msg, idx, msgs): + return msg + + new_manager = MappingConversationManager(mapper=noop_mapper) + new_manager.restore_from_session(state) + assert new_manager.removed_message_count == 5 + + def test_mapping_with_no_removals(self, simple_manager, mock_agent): + """Test mapping even when no messages are removed.""" + # Create messages where nothing will be removed + mock_agent.messages = [ + {"role": "user", "content": [{"text": "Keep 1"}]}, + {"role": "user", "content": [{"text": "Keep 2"}]}, + {"role": "user", "content": [{"text": "Keep 3"}]}, + {"role": "user", "content": [{"text": "Keep 4"}]}, + ] + + # Should complete successfully even if nothing is removed + simple_manager.reduce_context(mock_agent) + assert len(mock_agent.messages) == 4 + assert simple_manager.removed_message_count == 0 + + +class TestLargeToolResultMapper: + """Test the LargeToolResultMapper implementation.""" + + @pytest.fixture + def mapper(self): + return LargeToolResultMapper(max_tokens=100, truncate_at=50) + + @pytest.fixture + def mock_agent(self): + return Mock() + + def test_initialization(self): + """Test mapper initialization.""" + mapper = LargeToolResultMapper(max_tokens=50000, truncate_at=500, compression_template="Custom template") + + assert mapper.max_tokens == 50000 + assert mapper.truncate_at == 500 + assert mapper.compression_template == "Custom template" + + def test_does_not_map_small_tool_results(self, mapper): + """Test that small tool results are not modified.""" + message: Message = { + "role": "user", + "content": [ + { + "toolResult": { + "toolUseId": "123", + "content": [{"text": "Small result"}], + "status": "success", + } + } + ], + } + + result = mapper(message, 0, [message]) + assert result == message # Should be unchanged + + def test_compresses_large_tool_results(self, mapper): + """Test that large tool results are compressed.""" + large_text = "A" * 1000 # Large content + message: Message = { + "role": "user", + "content": [ + { + "toolResult": { + "toolUseId": "123", + "content": [{"text": large_text}], + "status": "success", + } + } + ], + } + + result = mapper(message, 0, [message]) + + assert result is not None + assert result != message # Should be modified + + # Check compression occurred + tool_result = result["content"][0]["toolResult"] + assert len(tool_result["content"]) >= 1 + assert "compressed" in tool_result["content"][0]["text"].lower() + + def test_does_not_map_non_tool_messages(self, mapper): + """Test that regular messages are not affected.""" + message: Message = {"role": "user", "content": [{"text": "Regular message"}]} + + result = mapper(message, 0, [message]) + assert result == message + + def test_estimate_tokens_for_text(self, mapper): + """Test token estimation for text content.""" + tool_result = { + "toolUseId": "123", + "content": [{"text": "test " * 100}], # 400 chars -> ~100 tokens + "status": "success", + } + + tokens = mapper._estimate_tool_result_tokens(tool_result) + assert tokens > 50 + assert isinstance(tokens, int) + + def test_estimate_tokens_for_json(self, mapper): + """Test token estimation for JSON content.""" + tool_result = { + "toolUseId": "123", + "content": [{"json": {"key": "value", "number": 42}}], + "status": "success", + } + + tokens = mapper._estimate_tool_result_tokens(tool_result) + assert tokens > 0 + assert isinstance(tokens, int) + + def test_compress_truncates_long_text(self, mapper): + """Test that long text is truncated.""" + tool_result = { + "toolUseId": "123", + "content": [{"text": "A" * 500}], + "status": "success", + } + + compressed = mapper._compress_tool_result(tool_result) + + assert compressed["toolUseId"] == "123" + assert compressed["status"] == "success" + + # Should have compression note + truncated content + assert len(compressed["content"]) >= 2 + assert "compressed" in compressed["content"][0]["text"].lower() + assert "truncated" in compressed["content"][1]["text"].lower() + + def test_compress_summarizes_large_json_dict(self, mapper): + """Test that large JSON dicts are summarized.""" + large_json = {f"key_{i}": f"value_{i}" for i in range(100)} + tool_result = { + "toolUseId": "123", + "content": [{"json": large_json}], + "status": "success", + } + + compressed = mapper._compress_tool_result(tool_result) + + # Should have compression summary + json_content = compressed["content"][1]["json"] + assert json_content.get("_compressed") is True + assert json_content.get("_type") == "dict" + assert json_content.get("_original_keys") == 100 + + def test_compress_summarizes_large_json_list(self, mapper): + """Test that large JSON lists are summarized.""" + large_list = [f"item_{i}" for i in range(100)] + tool_result = { + "toolUseId": "123", + "content": [{"json": large_list}], + "status": "success", + } + + compressed = mapper._compress_tool_result(tool_result) + + # Should have compression summary + json_content = compressed["content"][1]["json"] + assert json_content.get("_compressed") is True + assert json_content.get("_type") == "list" + assert json_content.get("_length") == 100 + + def test_preserves_small_json(self, mapper): + """Test that small JSON is not compressed.""" + small_json = {"key": "value"} + tool_result = { + "toolUseId": "123", + "content": [{"json": small_json}], + "status": "success", + } + + compressed = mapper._compress_tool_result(tool_result) + + # Should preserve small JSON + json_content = compressed["content"][1]["json"] + assert json_content == small_json + + def test_multiple_content_items(self, mapper): + """Test compression with multiple content items.""" + tool_result = { + "toolUseId": "123", + "content": [ + {"text": "A" * 100}, + {"json": {"key": "value"}}, + {"text": "B" * 100}, + ], + "status": "success", + } + + compressed = mapper._compress_tool_result(tool_result) + + # Should have compression note + all content items + assert len(compressed["content"]) >= 4 # note + 3 items