Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bug: Semantic Kernel Adapter for Anthropic models with tool calls #5675

Open
ekzhu opened this issue Feb 23, 2025 · 1 comment
Open

bug: Semantic Kernel Adapter for Anthropic models with tool calls #5675

ekzhu opened this issue Feb 23, 2025 · 1 comment

Comments

@ekzhu
Copy link
Collaborator

ekzhu commented Feb 23, 2025

import asyncio
import os
from dotenv import load_dotenv

load_dotenv()
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_core.models import ModelFamily, UserMessage
from autogen_ext.models.semantic_kernel import SKChatCompletionAdapter
from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.anthropic import AnthropicChatCompletion, AnthropicChatPromptExecutionSettings
from semantic_kernel.memory.null_memory import NullMemory


async def get_weather(city: str) -> str:
    """Get the weather for a city."""
    return f"The weather in {city} is 75 degrees."


async def main() -> None:
    sk_client = AnthropicChatCompletion(
        # ai_model_id="claude-3-5-sonnet-20241022",
        ai_model_id="claude-3-haiku-20240307",
        api_key=os.environ["ANTHROPIC_API_KEY"],
        service_id="my-service-id",  # Optional; for targeting specific services within Semantic Kernel
    )
    settings = AnthropicChatPromptExecutionSettings(
        temperature=0.2,
    )

    model_client = SKChatCompletionAdapter(
        sk_client,
        kernel=Kernel(memory=NullMemory()),
        prompt_settings=settings,
        model_info={
            "function_calling": True,
            "json_output": True,
            "vision": True,
            "family": ModelFamily.CLAUDE_3_5_SONNET,
        },
    )

    # Call the model directly.
    response = await model_client.create([UserMessage(content="What is the capital of France?", source="test")])
    print(response)

    # Create an assistant agent with the model client.
    assistant = AssistantAgent(
        "assistant", model_client=model_client, system_message="You are a helpful assistant.", tools=[get_weather],
        reflect_on_tool_use=True,
        model_client_stream=False,
    )
    # Call the assistant with a task.
    await Console(assistant.run_stream(task="What is the weather in Paris and London?"))



if __name__ == "__main__":
    asyncio.run(main())

This is the example, I am trying.

error trace:

finish_reason='stop' content='The capital of France is Paris.' usage=RequestUsage(prompt_tokens=0, completion_tokens=0) cached=False logprobs=None thought=None
---------- user ----------
What is the weather in Paris and London?
---------- assistant ----------
[FunctionCall(id='toolu_017CvSHZsqnf8repjmYZZysM', arguments='{"city": "Paris"}', name='get_weather')]
---------- assistant ----------
[FunctionExecutionResult(content='The weather in Paris is 75 degrees.', call_id='toolu_017CvSHZsqnf8repjmYZZysM', is_error=False)]
Unsupported item type in Tool message while formatting chat history for Anthropic: <class 'semantic_kernel.contents.text_content.TextContent'>
Traceback (most recent call last):
  File ".venv-3.13/lib/python3.13/site-packages/semantic_kernel/connectors/ai/anthropic/services/anthropic_chat_completion.py", line 328, in _send_chat_request
    response = await self.async_client.messages.create(**settings.prepare_settings_dict())
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File ".venv-3.13/lib/python3.13/site-packages/anthropic/resources/messages/messages.py", line 2054, in create
    return await self._post(
           ^^^^^^^^^^^^^^^^^
    ...<24 lines>...
    )
    ^
  File ".venv-3.13/lib/python3.13/site-packages/anthropic/_base_client.py", line 1855, in post
    return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File ".venv-3.13/lib/python3.13/site-packages/anthropic/_base_client.py", line 1549, in request
    return await self._request(
           ^^^^^^^^^^^^^^^^^^^^
    ...<5 lines>...
    )
    ^
  File ".venv-3.13/lib/python3.13/site-packages/anthropic/_base_client.py", line 1650, in _request
    raise self._make_status_error_from_response(err.response) from None
anthropic.BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'messages.2: all messages must have non-empty content except for the optional final assistant message'}}

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/Applications/PyCharm CE.app/Contents/plugins/python-ce/helpers/pydev/pydevd.py", line 1570, in _exec
    pydev_imports.execfile(file, globals, locals)  # execute the script
    ~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^
  File "/Applications/PyCharm CE.app/Contents/plugins/python-ce/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
    exec(compile(contents+"\n", file, 'exec'), glob, loc)
    ~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "src/anthropicTest.py", line 59, in <module>
    asyncio.run(main())
    ~~~~~~~~~~~^^^^^^^^
  File "/Library/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py", line 195, in run
    return runner.run(main)
           ~~~~~~~~~~^^^^^^
  File "/Library/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py", line 118, in run
    return self._loop.run_until_complete(task)
           ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^
  File "/Library/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py", line 725, in run_until_complete
    return future.result()
           ~~~~~~~~~~~~~^^
  File "src/anthropicTest.py", line 54, in main
    await Console(assistant.run_stream(task="What is the weather in Paris and London?"))
  File ".venv-3.13/lib/python3.13/site-packages/autogen_agentchat/ui/_console.py", line 117, in Console
    async for message in stream:
    ...<74 lines>...
                    total_usage.prompt_tokens += message.models_usage.prompt_tokens
  File ".venv-3.13/lib/python3.13/site-packages/autogen_agentchat/agents/_base_chat_agent.py", line 176, in run_stream
    async for message in self.on_messages_stream(input_messages, cancellation_token):
    ...<9 lines>...
            output_messages.append(message)
  File ".venv-3.13/lib/python3.13/site-packages/autogen_agentchat/agents/_assistant_agent.py", line 512, in on_messages_stream
    reflection_model_result = await self._model_client.create(
                              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        llm_messages, cancellation_token=cancellation_token
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File ".venv-3.13/lib/python3.13/site-packages/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py", line 424, in create
    result = await self._sk_client.get_chat_message_contents(chat_history, settings=settings, kernel=kernel)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File ".venv-3.13/lib/python3.13/site-packages/semantic_kernel/connectors/ai/chat_completion_client_base.py", line 134, in get_chat_message_contents
    return await self._inner_get_chat_message_contents(chat_history, settings)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File ".venv-3.13/lib/python3.13/site-packages/semantic_kernel/utils/telemetry/model_diagnostics/decorators.py", line 112, in wrapper_decorator
    return await completion_func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File ".venv-3.13/lib/python3.13/site-packages/semantic_kernel/connectors/ai/anthropic/services/anthropic_chat_completion.py", line 167, in _inner_get_chat_message_contents
    return await self._send_chat_request(settings)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File ".venv-3.13/lib/python3.13/site-packages/semantic_kernel/connectors/ai/anthropic/services/anthropic_chat_completion.py", line 330, in _send_chat_request
    raise ServiceResponseException(
    ...<2 lines>...
    ) from ex
semantic_kernel.exceptions.service_exceptions.ServiceResponseException: ("<class 'semantic_kernel.connectors.ai.anthropic.services.anthropic_chat_completion.AnthropicChatCompletion'> service failed to complete the request", BadRequestError("Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'messages.2: all messages must have non-empty content except for the optional final assistant message'}}"))
python-BaseException
Exception ignored in: <function _DeleteDummyThreadOnDel.__del__ at 0x104df16c0>
Traceback (most recent call last):
  File "/Applications/PyCharm CE.app/Contents/plugins/python-ce/helpers/pydev/_pydevd_bundle/pydevd_pep_669_tracing.py", line 635, in py_raise_callback
  File "/Library/Frameworks/Python.framework/Versions/3.13/lib/python3.13/threading.py", line 1435, in current_thread
TypeError: 'NoneType' object is not subscriptable

Process finished with exit code 1
``` 

Originally posted by @hariharan1st in #2164

@ekzhu
Copy link
Collaborator Author

ekzhu commented Feb 23, 2025

@lspinheiro this may be fixed by including the thought field in AssistantMessage introduced in #5500, which only fixed for OpenAI client. We may need to add this to Semantic kernel model adapter

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

No branches or pull requests

1 participant