diff --git a/README.md b/README.md index c34bd253..1288a6d5 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,7 @@ You can find some examples of how to use these adapters in the `adapters_example - [LangGraph](https://github.com/oracle/agent-spec/tree/main/adapters_examples/langgraph) - [AutoGen](https://github.com/oracle/agent-spec/tree/main/adapters_examples/autogen) +- [CrewAI](https://github.com/oracle/agent-spec/tree/main/adapters_examples/crewai) ## Positioning in the Agentic Ecosystem diff --git a/adapters_examples/crewai/agentspec_to_crewai.py b/adapters_examples/crewai/agentspec_to_crewai.py new file mode 100644 index 00000000..9fef522f --- /dev/null +++ b/adapters_examples/crewai/agentspec_to_crewai.py @@ -0,0 +1,85 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +# mypy: ignore-errors + +from pyagentspec.agent import Agent +from pyagentspec.llms import LlmGenerationConfig, VllmConfig +from pyagentspec.property import FloatProperty +from pyagentspec.tools import ClientTool, ServerTool + +tools = [ + ClientTool( + name="sum", + description="Sum two numbers", + inputs=[FloatProperty(title="a"), FloatProperty(title="b")], + outputs=[FloatProperty(title="result")], + ), + ClientTool( + name="subtract", + description="Subtract two numbers", + inputs=[FloatProperty(title="a"), FloatProperty(title="b")], + outputs=[FloatProperty(title="result")], + ), + ServerTool( + name="multiply", + description="Multiply two numbers", + inputs=[FloatProperty(title="a"), FloatProperty(title="b")], + outputs=[FloatProperty(title="result")], + ), + ServerTool( + name="divide", + description="Divide two numbers", + inputs=[FloatProperty(title="a"), FloatProperty(title="b")], + outputs=[FloatProperty(title="result")], + ), +] + +agent = Agent( + name="calculator_agent", + description="An agent that provides assistance with tool use.", + llm_config=VllmConfig( + name="llama-maverick", + model_id="Llama-4-Maverick", + url="url.to.my.llama.model", + default_generation_parameters=LlmGenerationConfig(temperature=0.1), + ), + system_prompt=( + "You are a helpful calculator agent.\n" + "Your duty is to compute the result of the given operation using tools, " + "and to output the result.\n" + "It's important that you reply with the result only.\n" + ), + tools=tools, +) + + +from pyagentspec.adapters.crewai import AgentSpecLoader + +importer = AgentSpecLoader( + tool_registry={ + "divide": lambda a, b: a / b, + "multiply": lambda a, b: a * b, + } +) +calculator_agent = importer.load_component(agent) + +from crewai import Crew, Task + +task = Task( + description="{user_input}", + expected_output="A helpful, concise reply to the user.", + agent=calculator_agent, +) +crew = Crew(agents=[calculator_agent], tasks=[task]) + +print("=== Running Crew AI Calculator Agent ===") +while True: + user_input = input("USER >>> ") + if user_input.lower() in ["exit", "quit"]: + break + response = crew.kickoff(inputs={"user_input": user_input}) + print("AGENT >>>", response) diff --git a/adapters_examples/crewai/crewai_to_agentspec.py b/adapters_examples/crewai/crewai_to_agentspec.py new file mode 100644 index 00000000..29ad1a9c --- /dev/null +++ b/adapters_examples/crewai/crewai_to_agentspec.py @@ -0,0 +1,104 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +# mypy: ignore-errors + +from crewai import LLM, Agent +from crewai.tools.base_tool import Tool +from pydantic import BaseModel + + +class InputSchema(BaseModel): + a: float + b: float + + +def sum_(a: float, b: float) -> float: + """Sum two numbers""" + return a + b + + +def subtract(a: float, b: float) -> float: + """Subtract two numbers""" + return a - b + + +def multiply(a: float, b: float) -> float: + """Multiply two numbers""" + return a * b + + +def divide(a: float, b: float) -> float: + """Divide two numbers""" + return a / b + + +llm = LLM( + model="hosted_vllm/Llama-4-Maverick", + api_base="http://url.to.my.llama.model/v1", + max_tokens=512, +) + +calculator_agent = Agent( + role="Calculator agent", + goal="Computes the mathematical operation prompted by the user", + backstory="You are a calculator with 20 years of experience", + llm=llm, + tools=[ + Tool( + name="sum", + description="Sum two numbers", + args_schema=InputSchema, + func=sum_, + ), + Tool( + name="subtract", + description="Subtract two numbers", + args_schema=InputSchema, + func=subtract, + ), + Tool( + name="divide", + description="Divide two numbers", + args_schema=InputSchema, + func=divide, + ), + Tool( + name="multiply", + description="Multiply two numbers", + args_schema=InputSchema, + func=multiply, + ), + ], +) + + +if __name__ == "__main__": + + from crewai import Crew, Task + + task = Task( + description="{history}", + expected_output="A helpful, concise reply to the user.", + agent=calculator_agent, + ) + crew = Crew(agents=[calculator_agent], tasks=[task]) + + history = [] + while True: + user_input = input("USER >>> ") + if user_input.lower() in ["exit", "quit"]: + break + history.append(f"User: {user_input}") + response = crew.kickoff(inputs={"history": history}) + history.append(f"Agent: {response}") + print("AGENT >>>", response) + + from pyagentspec.adapters.crewai import AgentSpecExporter + + exporter = AgentSpecExporter() + agentspec_yaml = exporter.to_yaml(calculator_agent) + print(agentspec_yaml) diff --git a/docs/pyagentspec/source/_components/all_components.json b/docs/pyagentspec/source/_components/all_components.json index 883e5d18..c73e36f8 100644 --- a/docs/pyagentspec/source/_components/all_components.json +++ b/docs/pyagentspec/source/_components/all_components.json @@ -282,6 +282,8 @@ "classes": [ {"path": "pyagentspec.adapters.autogen.AgentSpecExporter"}, {"path": "pyagentspec.adapters.autogen.AgentSpecLoader"}, + {"path": "pyagentspec.adapters.crewai.AgentSpecExporter"}, + {"path": "pyagentspec.adapters.crewai.AgentSpecLoader"}, {"path": "pyagentspec.adapters.langgraph.AgentSpecExporter"}, {"path": "pyagentspec.adapters.langgraph.AgentSpecLoader"} ] diff --git a/docs/pyagentspec/source/_static/icons/crewai-adapter.jpg b/docs/pyagentspec/source/_static/icons/crewai-adapter.jpg new file mode 100644 index 00000000..937a2bbd Binary files /dev/null and b/docs/pyagentspec/source/_static/icons/crewai-adapter.jpg differ diff --git a/docs/pyagentspec/source/_static/icons/crewai-icon.png b/docs/pyagentspec/source/_static/icons/crewai-icon.png new file mode 100644 index 00000000..bc44909d Binary files /dev/null and b/docs/pyagentspec/source/_static/icons/crewai-icon.png differ diff --git a/docs/pyagentspec/source/adapters/crewai.rst b/docs/pyagentspec/source/adapters/crewai.rst new file mode 100644 index 00000000..bc5937c6 --- /dev/null +++ b/docs/pyagentspec/source/adapters/crewai.rst @@ -0,0 +1,60 @@ +.. _crewaiadapter: + +============================ +Agent Spec Adapters - CrewAI +============================ + + +.. figure:: ../_static/icons/crewai-adapter.jpg + :align: center + :scale: 18% + :alt: Agent Spec adapter for CrewAI + + ↑ With the **Agent Spec adapter for CrewAI**, you can easily import agents from external frameworks using Agent Spec and run them with CrewAI. + +*CrewAI enables the design of collaborative AI agents and workflows, incorporating guardrails, memory, +and observability for production-ready multi-agent systems.* + + +Get started +=========== + +To get started, set up your Python environment (Python 3.10 to 3.13 required), +and then install the PyAgentSpec package with the CrewAI extension. + + +.. code-block:: bash + + python -m venv .venv + source .venv/bin/activate # On Windows: .venv\Scripts\activate + pip install "pyagentspec[crewai]" + + +You are now ready to use the adapter: + +- Run Agent Spec configurations with CrewAI (see more details :ref:`below `) +- Convert CrewAI agents to Agent Spec (see more details :ref:`below `) + + + +.. _spectocrewai: + +Run Agent Spec configurations with CrewAI +========================================= + + +.. literalinclude:: ../code_examples/adapter_crewai_quickstart.py + :language: python + :start-after: .. start-agentspec_to_runtime + :end-before: .. end-agentspec_to_runtime + + +.. _crewaitospec: + +Convert CrewAI agents to Agent Spec +=================================== + +.. literalinclude:: ../code_examples/adapter_crewai_quickstart.py + :language: python + :start-after: .. start-runtime_to_agentspec + :end-before: .. end-runtime_to_agentspec diff --git a/docs/pyagentspec/source/api/adapters.rst b/docs/pyagentspec/source/api/adapters.rst index c807ccf8..373c6241 100644 --- a/docs/pyagentspec/source/api/adapters.rst +++ b/docs/pyagentspec/source/api/adapters.rst @@ -16,6 +16,15 @@ LangGraph .. _adapters_langgraph_loader: .. autoclass:: pyagentspec.adapters.langgraph.AgentSpecLoader +CrewAI +------ + +.. _adapters_crewai_exporter: +.. autoclass:: pyagentspec.adapters.crewai.AgentSpecExporter + +.. _adapters_crewai_loader: +.. autoclass:: pyagentspec.adapters.crewai.AgentSpecLoader + AutoGen ------- diff --git a/docs/pyagentspec/source/changelog.rst b/docs/pyagentspec/source/changelog.rst index 57ee6cd6..67e8210d 100644 --- a/docs/pyagentspec/source/changelog.rst +++ b/docs/pyagentspec/source/changelog.rst @@ -7,6 +7,14 @@ Agent Spec |release| New features ^^^^^^^^^^^^ +* **Added CrewAI adapter to pyagentspec:** + + The CrewAI adapter is now available as part of ``pyagentspec``. + You can access its functionality through the ``pyagentspec.adapters.crewai`` subpackage. + It requires the ``crewai`` extra dependency to be installed. + + For more information read the :doc:`API Reference `. + * **MCP tools support in LangGraph adapter:** The LangGraph adapter now supports Model Context Protocol (MCP) tools. diff --git a/docs/pyagentspec/source/code_examples/adapter_crewai_quickstart.py b/docs/pyagentspec/source/code_examples/adapter_crewai_quickstart.py new file mode 100644 index 00000000..e1ccce27 --- /dev/null +++ b/docs/pyagentspec/source/code_examples/adapter_crewai_quickstart.py @@ -0,0 +1,128 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +# isort:skip_file +# fmt: off +# mypy: ignore-errors + +try: + import crewai # noqa: F401 +except ImportError: + exit() # Not installed +except RuntimeError as e: + if "Your system has an unsupported version of sqlite3" in str(e): + # ChromaDB requires a version of SQLite which is not always supported + __import__("pysqlite3") + import sys + sys.modules["sqlite3"] = sys.modules.pop("pysqlite3") + else: + raise e # other error + +# .. start-agentspec_to_runtime +# Create a Agent Spec agent +from pyagentspec.agent import Agent +from pyagentspec.llms.openaicompatibleconfig import OpenAiCompatibleConfig +from pyagentspec.property import FloatProperty +from pyagentspec.tools import ServerTool + +subtraction_tool = ServerTool( + name="subtraction-tool", + description="subtract two numbers together", + inputs=[FloatProperty(title="a"), FloatProperty(title="b")], + outputs=[FloatProperty(title="difference")], +) + +agentspec_llm_config = OpenAiCompatibleConfig( + name="llama-3.3-70b-instruct", + model_id="/storage/models/Llama-3.3-70B-Instruct", + url="url.to.my.llm", +) + +agentspec_agent = Agent( + name="agentspec_tools_test", + description="agentspec_tools_test", + llm_config=agentspec_llm_config, + system_prompt="Perform subtraction with the given tool.", + tools=[subtraction_tool], +) + +# Export the Agent Spec configuration +from pyagentspec.serialization import AgentSpecSerializer + +agentspec_config = AgentSpecSerializer().to_json(agentspec_agent) + +# Load and run the Agent Spec configuration with CrewAI +import os +os.environ["CREWAI_DISABLE_TELEMETRY"] = "true" +from crewai import Crew, Task +from pyagentspec.adapters.crewai import AgentSpecLoader + +def subtract(a: float, b: float) -> float: + return a - b + +async def main(): + loader = AgentSpecLoader(tool_registry={"subtraction-tool": subtract}) + assistant = loader.load_json(agentspec_config) + + while True: + task = Task( + description="{user_input}", + expected_output="A helpful, concise reply to the user.", + agent=assistant, + async_execution=True + ) + crew = Crew(agents=[assistant], tasks=[task]) + user_input = input("USER >> ") + if user_input == "exit": + break + response = await crew.kickoff_async(inputs={"user_input": user_input}) + print(f"AGENT >> {response}") + + +# anyio.run(main) +# USER >> Compute 987654321-123456789 +# AGENT >> 864197532 +# .. end-agentspec_to_runtime +# .. start-runtime_to_agentspec +# Create a CrewAI Agent +from crewai import LLM, Agent +from crewai.tools.base_tool import Tool +from pydantic import BaseModel + +class InputSchema(BaseModel): + a: float + b: float + +def subtract(a: float, b: float) -> float: + """Subtract two numbers""" + return a - b + +llm = LLM( + model="hosted_vllm/Llama-4-Maverick", + api_base="http://url.to.my.llama.model/v1", + max_tokens=512, +) + +crewai_agent = Agent( + role="Calculator agent", + goal="Computes the mathematical operation prompted by the user", + backstory="You are a calculator with 20 years of experience", + llm=llm, + tools=[ + Tool( + name="subtract", + description="Subtract two numbers", + args_schema=InputSchema, + func=subtract, + ), + ], +) + +# Convert to Agent Spec +from pyagentspec.adapters.crewai import AgentSpecExporter + +agentspec_config = AgentSpecExporter().to_json(crewai_agent) +# .. end-runtime_to_agentspec diff --git a/docs/pyagentspec/source/code_examples/wayflow_cross_framework_agent.py b/docs/pyagentspec/source/code_examples/wayflow_cross_framework_agent.py index 09972c5b..f095ff8d 100644 --- a/docs/pyagentspec/source/code_examples/wayflow_cross_framework_agent.py +++ b/docs/pyagentspec/source/code_examples/wayflow_cross_framework_agent.py @@ -137,6 +137,13 @@ def convert_agentspec_to_wayflow(agentspec_component: Component): loader = LangGraphLoader(tool_registry=tool_registry) agent = loader.load_component(deserialized_agentspec_agent) # .. end-using-langgraph-agentspec-adapter: +# .. using-crewai-agentspec-adapter: +# Load the Agent Spec component into a CrewAI assistant +from pyagentspec.adapters.crewai import AgentSpecLoader as CrewAILoader + +loader = CrewAILoader(tool_registry=tool_registry) +agent = loader.load_component(deserialized_agentspec_agent) +# .. end-using-crewai-agentspec-adapter: # .. using-autogen-agentspec-adapter: # Load the Agent Spec component into a AutoGen assistant from pyagentspec.adapters.autogen import AgentSpecLoader as AutoGenLoader diff --git a/docs/pyagentspec/source/docs_home.rst b/docs/pyagentspec/source/docs_home.rst index 5ac651e9..15f263cd 100644 --- a/docs/pyagentspec/source/docs_home.rst +++ b/docs/pyagentspec/source/docs_home.rst @@ -120,6 +120,7 @@ Agent Spec is developed jointly between Oracle Cloud Infrastructure and Oracle L LangGraph WayFlow + CrewAI AutoGen diff --git a/docs/pyagentspec/source/ecosystem/integrations.rst b/docs/pyagentspec/source/ecosystem/integrations.rst index 80715c1e..4168113f 100644 --- a/docs/pyagentspec/source/ecosystem/integrations.rst +++ b/docs/pyagentspec/source/ecosystem/integrations.rst @@ -15,7 +15,7 @@ Agent Spec Integrations :width: 250px -Open Agent Specification provides support for **LangGraph**, and **WayFlow**, as well as integrations +Open Agent Specification provides support for **LangGraph**, **CrewAI**, **WayFlow**, as well as integrations for a growing collection of AI Agents frameworks. Learn more in our guide on :doc:`running Agent Spec configurations across frameworks <../howtoguides/howto_execute_agentspec_across_frameworks>` @@ -49,6 +49,16 @@ for a growing collection of AI Agents frameworks. Learn more in our guide on .. grid:: 2 + .. grid-item-card:: |crewai-icon| + :link: https://docs.crewai.com/ + :link-alt: CrewAI Docs + + **CrewAI** + + Enables the design of collaborative AI agents and workflows, incorporating guardrails, memory, and observability for production-ready multi-agent systems. + + :bdg-success:`available` + .. grid-item-card:: |autogen-icon| :link: https://microsoft.github.io/autogen/ :link-alt: AutoGen Docs diff --git a/docs/pyagentspec/source/howtoguides/howto_execute_agentspec_across_frameworks.rst b/docs/pyagentspec/source/howtoguides/howto_execute_agentspec_across_frameworks.rst index 90ba9d39..ac985b45 100644 --- a/docs/pyagentspec/source/howtoguides/howto_execute_agentspec_across_frameworks.rst +++ b/docs/pyagentspec/source/howtoguides/howto_execute_agentspec_across_frameworks.rst @@ -43,6 +43,7 @@ Additionally, we provide the adapter implementation for some of the most common - `LangGraph `_ - `AutoGen `_ +- `CrewAI `_ .. seealso:: @@ -198,8 +199,19 @@ to the framework you want to use Agent Spec with. :start-after: .. using-langgraph-agentspec-adapter: :end-before: .. end-using-langgraph-agentspec-adapter: - .. tab:: AutoGen + .. tab:: CrewAI + + .. code-block:: bash + + # To use this adapter, please install pyagentspec with the "crewai" extension. + pip install "pyagentspec[crewai]" + .. literalinclude:: ../code_examples/wayflow_cross_framework_agent.py + :language: python + :start-after: .. using-crewai-agentspec-adapter: + :end-before: .. end-using-crewai-agentspec-adapter: + + .. tab:: AutoGen .. code-block:: bash diff --git a/docs/pyagentspec/source/index.rst b/docs/pyagentspec/source/index.rst index f1538b78..a2275b4e 100644 --- a/docs/pyagentspec/source/index.rst +++ b/docs/pyagentspec/source/index.rst @@ -183,7 +183,7 @@ Open Agent Specification, Agent Spec Agent Spec configurations can be executed with Agent Spec-compatible runtimes, such as `WayFlow `_, or - with other agentic frameworks, like AutoGen, and LangGraph, through adapters. + with other agentic frameworks, like AutoGen, CrewAI, and LangGraph, through adapters. ㅤ➔ :doc:`Run your Agent Spec Configuration Across Frameworks ` diff --git a/docs/pyagentspec/source/installation.rst b/docs/pyagentspec/source/installation.rst index 3ced39e3..095df24f 100644 --- a/docs/pyagentspec/source/installation.rst +++ b/docs/pyagentspec/source/installation.rst @@ -70,6 +70,7 @@ Extra dependencies |project| offers optional extra dependencies that can be installed to enable additional features. * The ``autogen`` extra dependency gives access to the AutoGen runtime adapter. +* The ``crewai`` extra dependency gives access to the CrewAI runtime adapter. * The ``langgraph`` extra dependency gives access to the LangGraph runtime adapter. To install extra dependencies, run the following command specifying the list of dependencies you want to install: diff --git a/pyagentspec/constraints/constraints.txt b/pyagentspec/constraints/constraints.txt index 1bfe5783..e9dff39c 100644 --- a/pyagentspec/constraints/constraints.txt +++ b/pyagentspec/constraints/constraints.txt @@ -9,6 +9,9 @@ autogen-core==0.7.4 autogen-ext==0.7.4 autogen-agentchat==0.7.4 +# CrewAI adapter +crewai==1.6.1 + # LangGraph adapter langgraph==0.6.11 langchain-core==0.3.75 diff --git a/pyagentspec/requirements-dev.txt b/pyagentspec/requirements-dev.txt index 5b8b2618..9c572a9e 100644 --- a/pyagentspec/requirements-dev.txt +++ b/pyagentspec/requirements-dev.txt @@ -33,7 +33,7 @@ sphinx_design==0.6.1 -c constraints/constraints.txt # Frameworks do not support all python versions up to 3.14 yet --e .[autogen,langgraph_mcp] +-e .[autogen,crewai,langgraph_mcp] # or -e . diff --git a/pyagentspec/setup.cfg b/pyagentspec/setup.cfg index f7fd4b98..4640d011 100644 --- a/pyagentspec/setup.cfg +++ b/pyagentspec/setup.cfg @@ -9,6 +9,8 @@ filterwarnings = # documentation: https://docs.pytest.org/en/7.1.x/how-to/capture-warnings.html#warns error ignore::ResourceWarning + # Crew AI warnings + ignore:Cannot set an attribute on 'crewai.rag' for child module:ImportWarning ignore:importing 'Const' from 'astroid' is deprecated:DeprecationWarning ignore:Support for class-based `config` is deprecated:DeprecationWarning ignore:The `__fields__` attribute is deprecated, use the `model_fields` class property instead:DeprecationWarning diff --git a/pyagentspec/setup.py b/pyagentspec/setup.py index d354e141..86df70ca 100644 --- a/pyagentspec/setup.py +++ b/pyagentspec/setup.py @@ -79,6 +79,10 @@ def read(file_name): "autogen-ext[ollama,openai]>=0.5.6; python_version < '3.13'", "autogen-agentchat>=0.5.6; python_version < '3.13'", ], + "crewai": [ + "crewai[litellm]>=1.6.1; python_version < '3.14'", + "litellm>=1.79.0; python_version < '3.14'", + ], "langgraph": LANGGRAPH_DEPS, "langgraph_mcp": LANGGRAPH_DEPS + ["langchain-mcp-adapters>=0.1.13,<0.2.0"], }, diff --git a/pyagentspec/src/pyagentspec/adapters/crewai/__init__.py b/pyagentspec/src/pyagentspec/adapters/crewai/__init__.py new file mode 100644 index 00000000..63d7cfd4 --- /dev/null +++ b/pyagentspec/src/pyagentspec/adapters/crewai/__init__.py @@ -0,0 +1,15 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +"""Agent Spec adapter for the CrewAI agentic framework.""" + +from .agentspecexporter import AgentSpecExporter +from .agentspecloader import AgentSpecLoader + +__all__ = [ + "AgentSpecExporter", + "AgentSpecLoader", +] diff --git a/pyagentspec/src/pyagentspec/adapters/crewai/_agentspecconverter.py b/pyagentspec/src/pyagentspec/adapters/crewai/_agentspecconverter.py new file mode 100644 index 00000000..31b3d6f7 --- /dev/null +++ b/pyagentspec/src/pyagentspec/adapters/crewai/_agentspecconverter.py @@ -0,0 +1,219 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + + +import uuid +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Type, + Union, + cast, + get_args, + get_origin, + get_type_hints, +) + +from pydantic import BaseModel + +from pyagentspec.adapters.crewai._types import ( + CrewAIAgent, + CrewAIBaseTool, + CrewAILlm, + CrewAIStructuredTool, + CrewAITool, +) +from pyagentspec.agent import Agent as AgentSpecAgent +from pyagentspec.component import Component as AgentSpecComponent +from pyagentspec.llms import LlmConfig as AgentSpecLlmConfig +from pyagentspec.llms import LlmGenerationConfig as AgentSpecLlmGenerationConfig +from pyagentspec.llms.ollamaconfig import OllamaConfig as AgentSpecOllamaModel +from pyagentspec.llms.openaicompatibleconfig import ( + OpenAiCompatibleConfig as AgentSpecOpenAiCompatibleConfig, +) +from pyagentspec.llms.openaiconfig import OpenAiConfig as AgentSpecOpenAiConfig +from pyagentspec.llms.vllmconfig import VllmConfig as AgentSpecVllmModel +from pyagentspec.property import Property as AgentSpecProperty +from pyagentspec.tools import ServerTool as AgentSpecServerTool +from pyagentspec.tools import Tool as AgentSpecTool + + +def generate_id() -> str: + return str(uuid.uuid4()) + + +def _get_obj_reference(obj: Any) -> str: + return f"{obj.__class__.__name__.lower()}/{id(obj)}" + + +def _pydantic_model_to_properties_list(model: Type[BaseModel]) -> List[AgentSpecProperty]: + json_schema = model.model_json_schema() + for property_name, property_json_schema in json_schema["properties"].items(): + property_json_schema["title"] = property_name + return [ + AgentSpecProperty(json_schema=property_json_schema) + for property_name, property_json_schema in json_schema["properties"].items() + ] + + +def _python_type_to_jsonschema(py_type: Any) -> Dict[str, Any]: + origin = get_origin(py_type) + args = get_args(py_type) + if py_type is int: + return {"type": "integer"} + elif py_type is float: + return {"type": "number"} + elif py_type is str: + return {"type": "string"} + elif py_type is bool: + return {"type": "boolean"} + elif py_type is None: + return {"type": "null"} + elif origin is list or origin is List: + return {"type": "array", "items": _python_type_to_jsonschema(args[0])} + elif origin is dict or origin is Dict: + return {"type": "object"} + elif origin is Union: + return {"anyOf": [_python_type_to_jsonschema(a) for a in args if a is not type(None)]} + else: + return {} + + +def _get_return_type_json_schema_from_function_reference( + func: Callable[..., Any], +) -> Dict[str, Any]: + hints = get_type_hints(func) + return _python_type_to_jsonschema(hints.get("return", str)) + + +class CrewAIToAgentSpecConverter: + + def convert( + self, + crewai_component: Any, + referenced_objects: Optional[Dict[str, AgentSpecComponent]] = None, + ) -> AgentSpecComponent: + """Convert the given CrewAI component object into the corresponding PyAgentSpec component""" + + if referenced_objects is None: + referenced_objects = dict() + + # Reuse the same object multiple times in order to exploit the referencing system + object_reference = _get_obj_reference(crewai_component) + if object_reference in referenced_objects: + return referenced_objects[object_reference] + + # If we did not find the object, we create it, and we record it in the referenced_objects registry + agentspec_component: AgentSpecComponent + if isinstance(crewai_component, CrewAILlm): + agentspec_component = self._llm_convert_to_agentspec( + crewai_component, referenced_objects + ) + elif isinstance(crewai_component, CrewAIAgent): + agentspec_component = self._agent_convert_to_agentspec( + crewai_component, referenced_objects + ) + elif isinstance(crewai_component, CrewAIBaseTool): + agentspec_component = self._tool_convert_to_agentspec( + crewai_component, referenced_objects + ) + else: + raise NotImplementedError( + f"The crewai type '{crewai_component.__class__.__name__}' is not yet supported " + f"for conversion. Please contact the AgentSpec team." + ) + referenced_objects[object_reference] = agentspec_component + return referenced_objects[object_reference] + + def _llm_convert_to_agentspec( + self, crewai_llm: CrewAILlm, referenced_objects: Dict[str, Any] + ) -> AgentSpecLlmConfig: + model_provider, model_id = crewai_llm.model.split("/", 1) + max_tokens = int(crewai_llm.max_tokens) if crewai_llm.max_tokens is not None else None + default_generation_parameters = AgentSpecLlmGenerationConfig( + temperature=crewai_llm.temperature, + top_p=crewai_llm.top_p, + max_tokens=max_tokens, + ) + if model_provider == "ollama": + if crewai_llm.base_url is None: + raise ValueError("Ollama LLM configuration requires a non-null base_url") + return AgentSpecOllamaModel( + name=crewai_llm.model, + model_id=model_id, + url=crewai_llm.base_url, + default_generation_parameters=default_generation_parameters, + ) + elif model_provider == "hosted_vllm": + if crewai_llm.api_base is None: + raise ValueError("VLLM LLM configuration requires a non-null api_base") + return AgentSpecVllmModel( + name=crewai_llm.model, + model_id=model_id, + url=crewai_llm.api_base.replace("/v1", ""), + default_generation_parameters=default_generation_parameters, + ) + elif model_provider == "openai": + if crewai_llm.api_base is not None: + return AgentSpecOpenAiCompatibleConfig( + name=crewai_llm.model, + model_id=model_id, + url=crewai_llm.api_base.replace("/v1", ""), + default_generation_parameters=default_generation_parameters, + ) + return AgentSpecOpenAiConfig( + name=crewai_llm.model, + model_id=model_id, + default_generation_parameters=default_generation_parameters, + ) + + raise ValueError(f"Unsupported type of LLM in Agent Spec: {model_provider}") + + def _tool_convert_to_agentspec( + self, crewai_tool: CrewAIBaseTool, referenced_objects: Dict[str, Any] + ) -> AgentSpecTool: + # We do our best to infer the output type + if isinstance(crewai_tool, (CrewAIStructuredTool, CrewAITool)): + # StructuredTool has the `func` attribute that contains the function + output_json_schema = _get_return_type_json_schema_from_function_reference( + crewai_tool.func + ) + else: + # Otherwise the CrewAI Tools are supposed to implement the `_run` method + output_json_schema = _get_return_type_json_schema_from_function_reference( + crewai_tool._run + ) + # There seem to be no counterparts for client tools and remote tools in CrewAI at the moment + return AgentSpecServerTool( + name=crewai_tool.name, + description=crewai_tool.description, + inputs=_pydantic_model_to_properties_list(crewai_tool.args_schema), + outputs=[AgentSpecProperty(title="result", json_schema=output_json_schema)], + ) + + def _agent_convert_to_agentspec( + self, crewai_agent: CrewAIAgent, referenced_objects: Dict[str, Any] + ) -> AgentSpecAgent: + return AgentSpecAgent( + id=str(crewai_agent.id), + name=crewai_agent.role, + description=crewai_agent.backstory, + system_prompt=crewai_agent.goal, + llm_config=cast( + AgentSpecLlmConfig, + self.convert( + crewai_agent.llm, + referenced_objects=referenced_objects, + ), + ), + tools=[ + cast(AgentSpecTool, self.convert(tool, referenced_objects=referenced_objects)) + for tool in (crewai_agent.tools or []) + ], + ) diff --git a/pyagentspec/src/pyagentspec/adapters/crewai/_crewaiconverter.py b/pyagentspec/src/pyagentspec/adapters/crewai/_crewaiconverter.py new file mode 100644 index 00000000..35ebbbdc --- /dev/null +++ b/pyagentspec/src/pyagentspec/adapters/crewai/_crewaiconverter.py @@ -0,0 +1,290 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Field, create_model + +from pyagentspec.adapters._tools_common import _create_remote_tool_func +from pyagentspec.adapters.crewai._types import ( + CrewAIAgent, + CrewAIBaseTool, + CrewAILlm, + CrewAIServerToolType, + CrewAITool, +) +from pyagentspec.adapters.crewai.tracing import CrewAIAgentWithTracing +from pyagentspec.agent import Agent as AgentSpecAgent +from pyagentspec.component import Component as AgentSpecComponent +from pyagentspec.llms import LlmConfig as AgentSpecLlmConfig +from pyagentspec.llms.ollamaconfig import OllamaConfig as AgentSpecOllamaModel +from pyagentspec.llms.openaicompatibleconfig import ( + OpenAiCompatibleConfig as AgentSpecOpenAiCompatibleConfig, +) +from pyagentspec.llms.openaiconfig import OpenAiConfig as AgentSpecOpenAiConfig +from pyagentspec.llms.vllmconfig import VllmConfig as AgentSpecVllmModel +from pyagentspec.property import Property as AgentSpecProperty +from pyagentspec.property import _empty_default as _agentspec_empty_default +from pyagentspec.tools import Tool as AgentSpecTool +from pyagentspec.tools.clienttool import ClientTool as AgentSpecClientTool +from pyagentspec.tools.remotetool import RemoteTool as AgentSpecRemoteTool +from pyagentspec.tools.servertool import ServerTool as AgentSpecServerTool + + +def _json_schema_type_to_python_annotation(json_schema: Dict[str, Any]) -> str: + if "anyOf" in json_schema: + possible_types = set( + _json_schema_type_to_python_annotation(inner_json_schema_type) + for inner_json_schema_type in json_schema["anyOf"] + ) + return f"Union[{','.join(possible_types)}]" + if isinstance(json_schema["type"], list): + possible_types = set( + _json_schema_type_to_python_annotation(inner_json_schema_type) + for inner_json_schema_type in json_schema["type"] + ) + return f"Union[{','.join(possible_types)}]" + mapping = { + "string": "str", + "number": "float", + "integer": "int", + "boolean": "bool", + "null": "None", + } + if json_schema["type"] == "object": + # We could do better in inferring the type of values, for now we just use Any + return "Dict[str, Any]" + if json_schema["type"] == "array": + return f"List[{_json_schema_type_to_python_annotation(json_schema['items'])}]" + return mapping.get(json_schema["type"], "Any") + + +def _create_pydantic_model_from_properties( + model_name: str, properties: List[AgentSpecProperty] +) -> type[BaseModel]: + """Create a Pydantic model CLASS whose attributes are the given properties.""" + fields: Dict[str, Any] = {} + for property_ in properties: + field_parameters: Dict[str, Any] = {} + param_name = property_.title + if property_.default is not _agentspec_empty_default: + field_parameters["default"] = property_.default + if property_.description: + field_parameters["description"] = property_.description + annotation = _json_schema_type_to_python_annotation(property_.json_schema) + fields[param_name] = (annotation, Field(**field_parameters)) + return create_model(model_name, **fields) + + +class AgentSpecToCrewAIConverter: + + def __init__(self, enable_agentspec_tracing: bool = True) -> None: + self.enable_agentspec_tracing = enable_agentspec_tracing + self._is_root_call: bool = True + self._obj_id_to_agentspec_component: Dict[int, AgentSpecComponent] = {} + + def convert( + self, + agentspec_component: AgentSpecComponent, + tool_registry: Dict[str, CrewAIServerToolType], + converted_components: Optional[Dict[str, Any]] = None, + ) -> Any: + """Convert the given PyAgentSpec component object into the corresponding CrewAI component""" + if converted_components is None: + converted_components = {} + + if self._is_root_call: + # Reset the obj id -> agentspec component mapping + self._obj_id_to_agentspec_component = {} + + is_root_call = self._is_root_call + self._is_root_call = False + + if agentspec_component.id not in converted_components: + # If we did not find the object, we create it, and we record it in the referenced_objects registry + crewai_component: Any + if isinstance(agentspec_component, AgentSpecLlmConfig): + crewai_component = self._llm_convert_to_crewai( + agentspec_component, tool_registry, converted_components + ) + elif isinstance(agentspec_component, AgentSpecAgent): + crewai_component = self._agent_convert_to_crewai( + agentspec_component, tool_registry, converted_components + ) + elif isinstance(agentspec_component, AgentSpecTool): + crewai_component = self._tool_convert_to_crewai( + agentspec_component, tool_registry, converted_components + ) + elif isinstance(agentspec_component, AgentSpecComponent): + raise NotImplementedError( + f"The AgentSpec Component type '{agentspec_component.__class__.__name__}' is not yet supported " + f"for conversion. Please contact the AgentSpec team." + ) + else: + raise TypeError( + f"Expected object of type 'pyagentspec.component.Component'," + f" but got {type(agentspec_component)} instead" + ) + converted_components[agentspec_component.id] = crewai_component + + converted_crewai_component = converted_components[agentspec_component.id] + self._obj_id_to_agentspec_component[id(converted_crewai_component)] = agentspec_component + + if ( + is_root_call + and self.enable_agentspec_tracing + and isinstance(converted_crewai_component, CrewAIAgentWithTracing) + ): + # If the root component is an agent to which we can attach an agent spec listener, + # we monkey patch the root CrewAI component to attach the event listener for Agent Spec + from pyagentspec.adapters.crewai.tracing import AgentSpecEventListener + + converted_crewai_component._agentspec_event_listener = AgentSpecEventListener( + agentspec_components=self._obj_id_to_agentspec_component + ) + + self._is_root_call = is_root_call + return converted_crewai_component + + def _llm_convert_to_crewai( + self, + agentspec_llm: AgentSpecLlmConfig, + tool_registry: Dict[str, CrewAIServerToolType], + converted_components: Optional[Dict[str, Any]] = None, + ) -> CrewAILlm: + + def parse_url(url: str) -> str: + url = url.strip() + if url.endswith("/completions"): + return url + if not url.endswith("/v1") and not url.endswith("/litellm"): + url += "/v1" + if not url.startswith("http"): + url = "http://" + url + return url + + llm_parameters: Dict[str, Any] = {} + if isinstance(agentspec_llm, AgentSpecOpenAiConfig): + llm_parameters["model"] = "openai/" + agentspec_llm.model_id + elif isinstance(agentspec_llm, AgentSpecVllmModel): + # CrewAI uses lite llm underneath: + # https://community.crewai.com/t/help-how-to-use-a-custom-local-llm-with-vllm/5746 + llm_parameters["model"] = "hosted_vllm/" + agentspec_llm.model_id + llm_parameters["api_base"] = parse_url(agentspec_llm.url) + elif isinstance(agentspec_llm, AgentSpecOpenAiCompatibleConfig): + llm_parameters["model"] = "openai/" + agentspec_llm.model_id + llm_parameters["api_base"] = parse_url(agentspec_llm.url) + elif isinstance(agentspec_llm, AgentSpecOllamaModel): + llm_parameters["model"] = "ollama/" + agentspec_llm.model_id + llm_parameters["base_url"] = parse_url(agentspec_llm.url) + else: + raise NotImplementedError() + + if agentspec_llm.default_generation_parameters is not None: + llm_parameters["top_p"] = agentspec_llm.default_generation_parameters.top_p + llm_parameters["temperature"] = agentspec_llm.default_generation_parameters.temperature + llm_parameters["max_tokens"] = agentspec_llm.default_generation_parameters.max_tokens + + return CrewAILlm(**llm_parameters) + + def _tool_convert_to_crewai( + self, + agentspec_tool: AgentSpecTool, + tool_registry: Dict[str, CrewAIServerToolType], + converted_components: Optional[Dict[str, Any]] = None, + ) -> CrewAIBaseTool: + if agentspec_tool.name in tool_registry: + tool = tool_registry[agentspec_tool.name] + if isinstance(tool, CrewAITool): + return tool + elif callable(tool): + return CrewAITool( + name=agentspec_tool.name, + description=agentspec_tool.description or "", + args_schema=_create_pydantic_model_from_properties( + agentspec_tool.name.title() + "InputSchema", agentspec_tool.inputs or [] + ), + func=tool, + ) + else: + raise ValueError( + f"Unsupported type of ServerTool `{agentspec_tool.name}`: {type(tool)}" + ) + if isinstance(agentspec_tool, AgentSpecServerTool): + raise ValueError( + f"The implementation of the ServerTool `{agentspec_tool.name}` " + f"must be provided in the tool registry" + ) + elif isinstance(agentspec_tool, AgentSpecClientTool): + + def client_tool(**kwargs: Any) -> Any: + tool_request = { + "type": "client_tool_request", + "name": agentspec_tool.name, + "description": agentspec_tool.description, + "inputs": kwargs, + } + response = input(f"{tool_request} -> ") + return response + + client_tool.__name__ = agentspec_tool.name + client_tool.__doc__ = agentspec_tool.description + return CrewAITool( + name=agentspec_tool.name, + description=agentspec_tool.description or "", + args_schema=_create_pydantic_model_from_properties( + agentspec_tool.name.title() + "InputSchema", agentspec_tool.inputs or [] + ), + func=client_tool, + ) + elif isinstance(agentspec_tool, AgentSpecRemoteTool): + return self._remote_tool_convert_to_crewai(agentspec_tool) + raise ValueError( + f"Tools of type {type(agentspec_tool)} are not yet supported for translation to CrewAI" + ) + + def _remote_tool_convert_to_crewai(self, remote_tool: AgentSpecRemoteTool) -> CrewAIBaseTool: + _remote_tool = _create_remote_tool_func(remote_tool) + _remote_tool.__name__ = remote_tool.name + _remote_tool.__doc__ = remote_tool.description + return CrewAITool( + name=remote_tool.name, + description=remote_tool.description or "", + args_schema=_create_pydantic_model_from_properties( + remote_tool.name.title() + "InputSchema", remote_tool.inputs or [] + ), + func=_remote_tool, + ) + + def _agent_convert_to_crewai( + self, + agentspec_agent: AgentSpecAgent, + tool_registry: Dict[str, CrewAIServerToolType], + converted_components: Optional[Dict[str, Any]] = None, + ) -> CrewAIAgent: + crewai_agent = CrewAIAgentWithTracing( + # We interpret the name as the `role` of the agent in CrewAI, + # the description as the `backstory`, and the system prompt as the `goal`, as they are all required + # This interpretation comes from the analysis of CrewAI Agent definition examples + role=agentspec_agent.name, + goal=agentspec_agent.system_prompt, + backstory=agentspec_agent.description or "", + llm=self.convert( + agentspec_agent.llm_config, + tool_registry=tool_registry, + converted_components=converted_components, + ), + tools=[ + self.convert( + tool, tool_registry=tool_registry, converted_components=converted_components + ) + for tool in agentspec_agent.tools + ], + ) + if not agentspec_agent.metadata: + agentspec_agent.metadata = {} + agentspec_agent.metadata["__crewai_agent_id__"] = str(crewai_agent.id) + return crewai_agent diff --git a/pyagentspec/src/pyagentspec/adapters/crewai/_types.py b/pyagentspec/src/pyagentspec/adapters/crewai/_types.py new file mode 100644 index 00000000..38202371 --- /dev/null +++ b/pyagentspec/src/pyagentspec/adapters/crewai/_types.py @@ -0,0 +1,108 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +from typing import TYPE_CHECKING, Any, Callable, Union + +from pyagentspec._lazy_loader import LazyLoader + +if TYPE_CHECKING: + # Important: do not move this import out of the TYPE_CHECKING block so long as crewai is an optional dependency. + # Otherwise, importing the module when they are not installed would lead to an import error. + + import crewai + from crewai import LLM as CrewAILlm + from crewai import Agent as CrewAIAgent + from crewai import Flow as CrewAIFlow + from crewai.events.base_event_listener import BaseEventListener as CrewAIBaseEventListener + from crewai.events.base_events import BaseEvent as CrewAIBaseEvent + from crewai.events.event_bus import CrewAIEventsBus, crewai_event_bus + from crewai.events.types.agent_events import ( + AgentExecutionCompletedEvent as CrewAIAgentExecutionCompletedEvent, + ) + from crewai.events.types.agent_events import ( + AgentExecutionStartedEvent as CrewAIAgentExecutionStartedEvent, + ) + from crewai.events.types.agent_events import ( + LiteAgentExecutionCompletedEvent as CrewAILiteAgentExecutionCompletedEvent, + ) + from crewai.events.types.agent_events import ( + LiteAgentExecutionStartedEvent as CrewAILiteAgentExecutionStartedEvent, + ) + from crewai.events.types.llm_events import LLMCallCompletedEvent as CrewAILLMCallCompletedEvent + from crewai.events.types.llm_events import LLMCallStartedEvent as CrewAILLMCallStartedEvent + from crewai.events.types.llm_events import LLMStreamChunkEvent as CrewAILLMStreamChunkEvent + from crewai.events.types.tool_usage_events import ( + ToolUsageFinishedEvent as CrewAIToolUsageFinishedEvent, + ) + from crewai.events.types.tool_usage_events import ( + ToolUsageStartedEvent as CrewAIToolUsageStartedEvent, + ) + from crewai.tools import BaseTool as CrewAIBaseTool + from crewai.tools.base_tool import Tool as CrewAITool + from crewai.tools.structured_tool import CrewStructuredTool as CrewAIStructuredTool +else: + crewai = LazyLoader("crewai") + # We need to import the classes this way because it's the only one accepted by the lazy loader + CrewAILlm = crewai.LLM + CrewAIAgent = crewai.Agent + CrewAIFlow = crewai.Flow + CrewAIBaseTool = LazyLoader("crewai.tools").BaseTool + CrewAITool = LazyLoader("crewai.tools.base_tool").Tool + CrewAIStructuredTool = LazyLoader("crewai.tools.structured_tool").CrewStructuredTool + CrewAIBaseEventListener = LazyLoader("crewai.events.base_event_listener").BaseEventListener + CrewAIEventsBus = LazyLoader("crewai.events.event_bus").CrewAIEventsBus + crewai_event_bus = LazyLoader("crewai.events.event_bus").crewai_event_bus + crewai = LazyLoader("crewai") + CrewAIAgentExecutionStartedEvent = LazyLoader( + "crewai.events.types.agent_events" + ).AgentExecutionStartedEvent + CrewAIAgentExecutionCompletedEvent = LazyLoader( + "crewai.events.types.agent_events" + ).AgentExecutionCompletedEvent + CrewAILiteAgentExecutionStartedEvent = LazyLoader( + "crewai.events.types.agent_events" + ).LiteAgentExecutionStartedEvent + CrewAILiteAgentExecutionCompletedEvent = LazyLoader( + "crewai.events.types.agent_events" + ).LiteAgentExecutionCompletedEvent + CrewAILLMCallCompletedEvent = LazyLoader("crewai.events.types.llm_events").LLMCallCompletedEvent + CrewAIBaseEvent = LazyLoader("crewai.events.base_events").BaseEvent + CrewAILLMCallStartedEvent = LazyLoader("crewai.events.types.llm_events").LLMCallStartedEvent + CrewAILLMStreamChunkEvent = LazyLoader("crewai.events.types.llm_events").LLMStreamChunkEvent + CrewAIToolUsageFinishedEvent = LazyLoader( + "crewai.events.types.tool_usage_events" + ).ToolUsageFinishedEvent + CrewAIToolUsageStartedEvent = LazyLoader( + "crewai.events.types.tool_usage_events" + ).ToolUsageStartedEvent + +CrewAIComponent = Union[CrewAIAgent, CrewAIFlow[Any]] +CrewAIServerToolType = Union[CrewAITool, Callable[..., Any]] + +__all__ = [ + "crewai", + "crewai_event_bus", + "CrewAILlm", + "CrewAIAgent", + "CrewAIFlow", + "CrewAIBaseTool", + "CrewAITool", + "CrewAIStructuredTool", + "CrewAIComponent", + "CrewAIServerToolType", + "CrewAIBaseEvent", + "CrewAIBaseEventListener", + "CrewAILLMCallCompletedEvent", + "CrewAILLMCallStartedEvent", + "CrewAILLMStreamChunkEvent", + "CrewAIToolUsageStartedEvent", + "CrewAIToolUsageFinishedEvent", + "CrewAIEventsBus", + "CrewAIAgentExecutionStartedEvent", + "CrewAIAgentExecutionCompletedEvent", + "CrewAILiteAgentExecutionStartedEvent", + "CrewAILiteAgentExecutionCompletedEvent", +] diff --git a/pyagentspec/src/pyagentspec/adapters/crewai/agentspecexporter.py b/pyagentspec/src/pyagentspec/adapters/crewai/agentspecexporter.py new file mode 100644 index 00000000..b5c7a7a1 --- /dev/null +++ b/pyagentspec/src/pyagentspec/adapters/crewai/agentspecexporter.py @@ -0,0 +1,56 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +from pyagentspec.adapters.crewai._agentspecconverter import CrewAIToAgentSpecConverter +from pyagentspec.adapters.crewai._types import CrewAIAgent, CrewAIComponent, CrewAIFlow +from pyagentspec.component import Component +from pyagentspec.serialization import AgentSpecSerializer as PyAgentSpecSerializer + + +class AgentSpecExporter: + """Helper class to convert CrewAI objects to Agent Spec configurations.""" + + def to_yaml(self, crewai_component: CrewAIComponent) -> str: + """ + Transform the given CrewAI component into the respective Agent Spec YAML representation. + + Parameters + ---------- + + crewai_component: + CrewAI Component to serialize to an Agent Spec configuration. + """ + agentlang_assistant = self.to_component(crewai_component) + return PyAgentSpecSerializer().to_yaml(agentlang_assistant) + + def to_json(self, crewai_component: CrewAIComponent) -> str: + """ + Transform the given CrewAI component into the respective Agent Spec JSON representation. + + Parameters + ---------- + + crewai_component: + CrewAI Component to serialize to an Agent Spec configuration. + """ + agentlang_assistant = self.to_component(crewai_component) + return PyAgentSpecSerializer().to_json(agentlang_assistant) + + def to_component(self, crewai_component: CrewAIComponent) -> Component: + """ + Transform the given CrewAI component into the respective PyAgentSpec Component. + + Parameters + ---------- + + crewai_component: + CrewAI Component to serialize to a corresponding PyAgentSpec Component. + """ + if not isinstance(crewai_component, (CrewAIAgent, CrewAIFlow)): + raise TypeError( + f"Expected an Agent of Flow, but got '{type(crewai_component)}' instead" + ) + return CrewAIToAgentSpecConverter().convert(crewai_component) diff --git a/pyagentspec/src/pyagentspec/adapters/crewai/agentspecloader.py b/pyagentspec/src/pyagentspec/adapters/crewai/agentspecloader.py new file mode 100644 index 00000000..75d385f3 --- /dev/null +++ b/pyagentspec/src/pyagentspec/adapters/crewai/agentspecloader.py @@ -0,0 +1,88 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + + +from typing import Dict, List, Optional, cast + +from pyagentspec.adapters.crewai._crewaiconverter import AgentSpecToCrewAIConverter +from pyagentspec.adapters.crewai._types import ( + CrewAIComponent, + CrewAIServerToolType, +) +from pyagentspec.component import Component as AgentSpecComponent +from pyagentspec.serialization import AgentSpecDeserializer, ComponentDeserializationPlugin + + +class AgentSpecLoader: + """Helper class to convert Agent Spec configurations to CrewAI objects.""" + + def __init__( + self, + tool_registry: Optional[Dict[str, CrewAIServerToolType]] = None, + plugins: Optional[List[ComponentDeserializationPlugin]] = None, + ): + """ + Parameters + ---------- + + tool_registry: + Optional dictionary to enable converting/loading assistant configurations involving the + use of tools. Keys must be the tool names as specified in the serialized configuration, and + the values are the tool objects. + plugins: + Optional list of plugins to enable converting/loading assistant configurations involving + non-core Agent Spec components. + """ + self.tool_registry = tool_registry or {} + self.plugins = plugins + self._enable_agentspec_tracing = True + + def load_yaml(self, serialized_assistant: str) -> CrewAIComponent: + """ + Transform the given Agent Spec YAML representation into the respective CrewAI Component + + Parameters + ---------- + + serialized_assistant: + Serialized Agent Spec configuration to be converted to a CrewAI Component. + """ + agentspec_assistant = AgentSpecDeserializer(plugins=self.plugins).from_yaml( + serialized_assistant + ) + return self.load_component(agentspec_assistant) + + def load_json(self, serialized_assistant: str) -> CrewAIComponent: + """ + Transform the given Agent Spec JSON representation into the respective CrewAI Component + + Parameters + ---------- + + serialized_assistant: + Serialized Agent Spec configuration to be converted to a CrewAI Component. + """ + agentspec_assistant = AgentSpecDeserializer(plugins=self.plugins).from_json( + serialized_assistant + ) + return self.load_component(agentspec_assistant) + + def load_component(self, agentspec_component: AgentSpecComponent) -> CrewAIComponent: + """ + Transform the given PyAgentSpec Component into the respective CrewAI Component + + Parameters + ---------- + + agentspec_component: + PyAgentSpec Component to be converted to a CrewAI Component. + """ + return cast( + CrewAIComponent, + AgentSpecToCrewAIConverter( + enable_agentspec_tracing=self._enable_agentspec_tracing, + ).convert(agentspec_component, self.tool_registry), + ) diff --git a/pyagentspec/src/pyagentspec/adapters/crewai/tracing.py b/pyagentspec/src/pyagentspec/adapters/crewai/tracing.py new file mode 100644 index 00000000..f458f110 --- /dev/null +++ b/pyagentspec/src/pyagentspec/adapters/crewai/tracing.py @@ -0,0 +1,488 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +import json +import threading +import time +import uuid +from contextlib import contextmanager +from typing import Any, Dict, Generator, List, Optional, Type, cast + +from pydantic import PrivateAttr + +from pyagentspec import Agent as AgentSpecAgent +from pyagentspec import Component as AgentSpecComponent +from pyagentspec.adapters.crewai._types import ( + CrewAIAgent, + CrewAIAgentExecutionCompletedEvent, + CrewAIAgentExecutionStartedEvent, + CrewAIBaseEvent, + CrewAIBaseEventListener, + CrewAIEventsBus, + CrewAILiteAgentExecutionCompletedEvent, + CrewAILiteAgentExecutionStartedEvent, + CrewAILLMCallCompletedEvent, + CrewAILLMCallStartedEvent, + CrewAILLMStreamChunkEvent, + CrewAIToolUsageFinishedEvent, + CrewAIToolUsageStartedEvent, +) +from pyagentspec.llms import LlmConfig as AgentSpecLlmConfig +from pyagentspec.llms import OpenAiCompatibleConfig as AgentSpecOpenAiCompatibleConfig +from pyagentspec.llms import OpenAiConfig as AgentSpecOpenAiConfig +from pyagentspec.tools import Tool as AgentSpecTool +from pyagentspec.tracing.events import ( + AgentExecutionEnd, + AgentExecutionStart, + LlmGenerationChunkReceived, + LlmGenerationRequest, + LlmGenerationResponse, + ToolExecutionRequest, + ToolExecutionResponse, +) +from pyagentspec.tracing.messages.message import Message as AgentSpecMessage +from pyagentspec.tracing.spans import AgentExecutionSpan, LlmGenerationSpan, Span, ToolExecutionSpan +from pyagentspec.tracing.spans.span import ( + _ACTIVE_SPAN_STACK, + get_active_span_stack, + get_current_span, +) + + +def _get_closest_span_of_given_type(agentspec_span_type: Type[Span]) -> Optional[Span]: + return next( + (span for span in get_active_span_stack()[::-1] if isinstance(span, agentspec_span_type)), + None, + ) + + +def _ensure_dict(obj: Any) -> Dict[str, Any]: + """Ensure that an object is a dict, if it is not, transform it into one.""" + if isinstance(obj, dict): + return obj + if isinstance(obj, str): + stripped = obj.strip() + if stripped.startswith("{") or stripped.startswith("["): + try: + parsed = json.loads(stripped) + if isinstance(parsed, list): + return {"value": parsed} + if isinstance(parsed, dict): + return parsed + except Exception: + return {"value": obj} + return {"value": obj} + return {"value": str(obj)} + + +class AgentSpecEventListener: + + def __init__(self, agentspec_components: Dict[int, AgentSpecComponent]) -> None: + super().__init__() + self.agentspec_components = agentspec_components + self._event_listener: Optional[_CrewAiEventListener] = None + self.scoped_handlers_context_generator: Optional[Generator[None, Any, None]] = None + self._events_flush_timeout: float = 2.0 + + @contextmanager + def record_listener(self) -> Generator[None, Any, None]: + from crewai.events import crewai_event_bus + + with crewai_event_bus.scoped_handlers(): + self._event_listener = _CrewAiEventListener(self.agentspec_components) + yield + # Before getting out, we ensure that the events have all been handled + # We first wait a little to make the handlers start before we continue with this code + time.sleep(0.1) + start_time = time.time() + while ( + len(self._event_listener._events_list) > 0 + and start_time + self._events_flush_timeout > time.time() + ): + time.sleep(0.1) + self._event_listener = None + + +class _CrewAiEventListener(CrewAIBaseEventListener): + """Bridges CrewAI streaming and tool events to Agent Spec Tracing""" + + def __init__(self, agentspec_components: Dict[int, AgentSpecComponent]) -> None: + super().__init__() + self.agentspec_components = agentspec_components + self.llm_configs_map: Dict[str, AgentSpecLlmConfig] = { + llm.model_id: llm + for llm in agentspec_components.values() + if isinstance(llm, (AgentSpecOpenAiConfig, AgentSpecOpenAiCompatibleConfig)) + } + self.tools_map: Dict[str, AgentSpecTool] = { + tool.name: tool + for tool in agentspec_components.values() + if isinstance(tool, AgentSpecTool) + } + self.agents_map: Dict[str, AgentSpecAgent] = { + (agent.metadata or {}).get("__crewai_agent_id__", str(agent_obj_id)): agent + for agent_obj_id, agent in agentspec_components.items() + if isinstance(agent, AgentSpecAgent) + } + # We keep a registry of conversions, so that we do not repeat the conversion for the same object twice + self.agentspec_spans_registry: Dict[str, Span] = {} + # Correlation helpers + self._agent_fingerprint_to_last_msg: Dict[str, str] = {} + # Track active tool execution spans by CrewAI agent_key + self._tool_span_by_agent_key: Dict[str, ToolExecutionSpan] = {} + # Track active agent execution spans by CrewAI agent_key + self._agent_span_by_agent_key: Dict[str, AgentExecutionSpan] = {} + # Per-agent_key tool_call_id and parent message id for correlation + self._tool_call_id_by_agent_key: Dict[str, str] = {} + self._parent_msg_by_agent_key: Dict[str, Optional[str]] = {} + # This is a reference to the parent span stack, it is needed because it must be shared + # when dealing with events, otherwise the changes to the stack performed in there, + # like span start or end, are not persisted + self._parent_context = _ACTIVE_SPAN_STACK.get() + # Events are raised and handled sometimes concurrently (especially end of previous span and start of new one), + # which makes it hard to handle the nested structure of spans + # See the `_add_event_and_handle_events_list` method for more information. + # This lock is used to manage the event list with a single thread at a time + self._lock = threading.Lock() + # This list contains all the pending events that could not be handled properly yet + self._events_list: List[CrewAIBaseEvent] = [] + + def _get_agentspec_component_from_crewai_object(self, crewai_obj: Any) -> AgentSpecComponent: + return self.agentspec_components[id(crewai_obj)] + + @contextmanager + def _parent_span_stack(self) -> Generator[None, Any, None]: + """ + Context manager that sets the span stack of the root context in the current context. + It is used because events are handled in async "threads" that have a different context, + so changes to the span stack performed in there would not be persisted and propagated to the parent context. + This way we centralize the context in this object and propagate/persist the changes across all the event handlers. + """ + _ACTIVE_SPAN_STACK.set(self._parent_context) + yield + self._parent_context = _ACTIVE_SPAN_STACK.get() + + def _handle_event(self, event: CrewAIBaseEvent) -> bool: + """ + Deal with the occurrence of the given event. + Returns True if the event is properly handled, False if the event cannot be handled. + """ + span: Span + match event: + case CrewAILiteAgentExecutionStartedEvent() | CrewAIAgentExecutionStartedEvent(): + if isinstance(event, CrewAILiteAgentExecutionStartedEvent): + agent_key = str(event.agent_info.get("id")) + else: + agent_key = str(event.agent.id) + agent = self.agents_map.get(agent_key) + if agent is None: + return False + span = AgentExecutionSpan(agent=agent) + span.start() + span.add_event(AgentExecutionStart(agent=agent, inputs={})) + self._agent_span_by_agent_key[agent_key] = span + return True + case CrewAILiteAgentExecutionCompletedEvent() | CrewAIAgentExecutionCompletedEvent(): + if not isinstance(get_current_span(), AgentExecutionSpan): + return False + if isinstance(event, CrewAILiteAgentExecutionCompletedEvent): + agent_key = str(event.agent_info.get("id")) + else: + agent_key = str(event.agent.id) + agent = self.agents_map.get(agent_key) + if agent is None: + return False + span = self._agent_span_by_agent_key[agent_key] + span.add_event( + AgentExecutionEnd( + agent=agent, + outputs={"output": event.output} if hasattr(event, "output") else {}, + ) + ) + span.end() + self._agent_span_by_agent_key.pop(agent_key, None) + return True + case CrewAILLMCallStartedEvent(): + if not isinstance(get_current_span(), AgentExecutionSpan): + return False + messages = event.messages or [] + if isinstance(messages, str): + messages = [{"content": messages}] + run_id = self._compute_chat_history_hash(messages) + model_id = self._sanitize_model_id(event.model or "") + # model_id should match an entry in the config map + llm_cfg = self.llm_configs_map.get(model_id) + if llm_cfg is None and "/" in model_id: + # Try last token as a fallback (provider differences) + llm_cfg = self.llm_configs_map.get(model_id.split("/")[-1]) + if llm_cfg is None: + raise RuntimeError( + f"Unable to find the Agent Spec LlmConfig during tracing: `{model_id}`" + ) + span = LlmGenerationSpan(id=run_id, llm_config=llm_cfg) + span.start() + span.add_event( + LlmGenerationRequest( + llm_config=span.llm_config, + llm_generation_config=span.llm_config.default_generation_parameters, + prompt=[ + AgentSpecMessage( + content=m["content"], + role=m["role"], + ) + for m in messages + ], + tools=list(self.tools_map.values()), + request_id=run_id, + ) + ) + self.agentspec_spans_registry[run_id] = span + return True + case CrewAILLMCallCompletedEvent(): + if not isinstance(get_current_span(), LlmGenerationSpan): + return False + messages = event.messages or [] + if isinstance(messages, str): + messages = [{"content": messages}] + run_id = self._compute_chat_history_hash(messages) + span = cast(LlmGenerationSpan, self.agentspec_spans_registry[run_id]) + span.add_event( + LlmGenerationResponse( + llm_config=span.llm_config, + completion_id=run_id, + content=event.response, + tool_calls=[], + request_id=run_id, + ) + ) + span.end() + self.agentspec_spans_registry.pop(run_id, None) + return True + case CrewAILLMStreamChunkEvent(): + current_span = _get_closest_span_of_given_type(LlmGenerationSpan) + if isinstance(current_span, LlmGenerationSpan): + current_span.add_event( + LlmGenerationChunkReceived( + llm_config=current_span.llm_config, + completion_id=current_span.id, + content=event.chunk, + tool_calls=[], + request_id=current_span.id, + ) + ) + return True + case CrewAIToolUsageStartedEvent(): + tool_name = event.tool_name + tool_args = event.tool_args + agent_key = event.agent_key or "" + # Correlate to current assistant message via agent fingerprint + parent_msg_id = None + if event.source_fingerprint: + parent_msg_id = self._agent_fingerprint_to_last_msg.get( + event.source_fingerprint + ) + + # Resolve tool object and create a ToolExecutionSpan + tool = self.tools_map.get(tool_name) + if tool is None: + return False + tool_span = ToolExecutionSpan(name=f"ToolExecution - {tool_name}", tool=tool) + tool_span.start() + self._tool_span_by_agent_key[agent_key] = tool_span + + # Ensure a tool_call_id for later correlation (no streaming support → always synthesize) + tool_call_id = str(uuid.uuid4()) + self._tool_call_id_by_agent_key[agent_key] = tool_call_id + self._parent_msg_by_agent_key[agent_key] = parent_msg_id + + inputs = _ensure_dict(tool_args) + tool_span.add_event( + ToolExecutionRequest( + tool=tool, + inputs=inputs, + request_id=tool_call_id, + ) + ) + return True + case CrewAIToolUsageFinishedEvent(): + if not isinstance(get_current_span(), ToolExecutionSpan): + return False + + outputs = event.output + agent_key = event.agent_key or "" + + tool_span = self._tool_span_by_agent_key[agent_key] + tool_call_id = self._tool_call_id_by_agent_key[agent_key] + if tool_span is None: + return False + + tool_span.add_event( + ToolExecutionResponse( + request_id=tool_call_id, + tool=tool_span.tool, + outputs=_ensure_dict(outputs), + ) + ) + tool_span.end() + + # Cleanup + self._tool_span_by_agent_key.pop(agent_key, None) + self._tool_call_id_by_agent_key.pop(agent_key, None) + self._parent_msg_by_agent_key.pop(agent_key, None) + + return True + return False + + def _add_event_and_handle_events_list(self, new_event: CrewAIBaseEvent) -> None: + """ + The goal of this method is to add the given event to the events list, and then try to handle + all the events in the _events_list. The reason why we need this is that the order in which some + events are emitted/handled in CrewAI is arbitrary. For example, the llm generation end and the consequent + agent execution end events are emitted at the same time, and since event handlers are executed concurrently, + there's no guarantee on the order in which those events are handled. From an Agent Spec Tracing perspective, + instead, we need to have a precise order in order to open and close spans properly, according to the span stack. + + In order to recreate this order manually, we adopt the following solution. + When an event is emitted by CrewAI, we simply add it to the list of events that should be handled. + Then we try to handle all the events in the list. The idea is that: + - If an event cannot be handled (e.g., because it's not in the correct span), it stays in the events list. + This means that another event has to happen in order to unlock this event to be handled. When that event will happen, + it will unlock this event from being handled, and that will happen. + - If the event can be handled, it is handled and popped from the list. This event being handled might unlock another event, + that will be handled as well, and so on until no event can be handled anymore, or the events list is empty. + """ + with self._lock: + # We first add the new event to the list of events to be handled. + # We use the lock to avoid changing the list that is already being modified by some other event handling + self._events_list.append(new_event) + with self._lock: + # We now take the lock again and try to handle all the events we can + events_correctly_handled = 1 + while events_correctly_handled > 0 and len(self._events_list) > 0: + event_indices_to_remove = [] + # We go over the list of events that are waiting for being handled + for i, event in enumerate(self._events_list): + # We need to ensure that we are using the right span stack contextvar + with self._parent_span_stack(): + # The events that get correctly handled, will be removed from the list, the others stay + if self._handle_event(event): + event_indices_to_remove.append(i) + events_correctly_handled = len(event_indices_to_remove) + # Remove the handled events from the list + for offset, event_index in enumerate(sorted(event_indices_to_remove)): + self._events_list.pop(event_index - offset) + + def setup_listeners(self, crewai_event_bus: CrewAIEventsBus) -> None: + """Register handlers on the global CrewAI event bus.""" + + @crewai_event_bus.on(CrewAILiteAgentExecutionStartedEvent) + def on_lite_agent_execution_started( + source: Any, event: CrewAILiteAgentExecutionStartedEvent + ) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAILiteAgentExecutionCompletedEvent) + def on_lite_agent_execution_finished( + source: Any, event: CrewAILiteAgentExecutionCompletedEvent + ) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAIAgentExecutionStartedEvent) + def on_agent_execution_started( + source: Any, event: CrewAIAgentExecutionStartedEvent + ) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAIAgentExecutionCompletedEvent) + def on_agent_execution_finished( + source: Any, event: CrewAIAgentExecutionCompletedEvent + ) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAILLMCallStartedEvent) + def on_llm_call_started(source: Any, event: CrewAILLMCallStartedEvent) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAILLMCallCompletedEvent) + def on_llm_call_completed(source: Any, event: CrewAILLMCallCompletedEvent) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAILLMStreamChunkEvent) + def on_llm_call_chunk(source: Any, event: CrewAILLMStreamChunkEvent) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAIToolUsageStartedEvent) + def on_tool_usage_started(source: Any, event: CrewAIToolUsageStartedEvent) -> None: + self._add_event_and_handle_events_list(event) + + @crewai_event_bus.on(CrewAIToolUsageFinishedEvent) + def on_tool_usage_finished(source: Any, event: CrewAIToolUsageFinishedEvent) -> None: + self._add_event_and_handle_events_list(event) + + @staticmethod + def _sanitize_model_id(model_id: str) -> str: + model_parts = model_id.split("/") if model_id else [] + if len(model_parts) > 1: + # Since CrewAI relies on LiteLLM, it contains the model provider at the start of the model id + # That is removed in Agent Spec conversion, so we must remove it from here too + return "/".join(model_parts[1:]) + return model_id + + @staticmethod + def _compute_chat_history_hash(messages: List[Dict[str, Any]]) -> str: + """Compute a stable UUID based on the list of messages. + + We only allow messages with role/content fields and roles in + {system,user,assistant} to mirror the frontend inputs. + """ + normalized = [ + { + "role": m["role"], + "content": str(m["content"]).replace("\r\n", "\n").replace("\r", "\n"), + } + for m in messages + ] + payload = json.dumps(normalized, ensure_ascii=False, separators=(",", ":"), sort_keys=True) + return str(uuid.uuid5(uuid.NAMESPACE_URL, payload)) + + +class CrewAIAgentWithTracing(CrewAIAgent): + """Extension of the CrewAI agent that contains the event handler for Agent Spec Tracing""" + + _agentspec_event_listener: Optional[AgentSpecEventListener] = PrivateAttr(default=None) + + @contextmanager + def agentspec_event_listener(self) -> Generator[None, Any, None]: + """ + Context manager that yields the agent spec event listener. + + Example of usage: + + from pyagentspec.agent import Agent + + system_prompt = '''You are an expert in computer science. Please help the users with their requests.''' + agent = Agent( + name="Adaptive expert agent", + system_prompt=system_prompt, + llm_config=llm_config, + ) + + from pyagentspec.adapters.crewai import AgentSpecLoader + from pyagentspec.tracing.trace import Trace + + crewai_agent = AgentSpecLoader().load_component(agent) + with Trace(name="crewai_tracing_test"): + with crewai_agent.agentspec_event_listener(): + response = crewai_agent.kickoff(messages="Talk about the Dijkstra's algorithm") + + """ + if self._agentspec_event_listener is None: + raise RuntimeError( + "Called Agent Spec event listener context manager, but no instance was provided. " + "Please set the _agentspec_event_listener attribute first." + ) + with self._agentspec_event_listener.record_listener(): + yield diff --git a/pyagentspec/tests/adapters/crewai/__init__.py b/pyagentspec/tests/adapters/crewai/__init__.py new file mode 100644 index 00000000..986e719d --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/__init__.py @@ -0,0 +1,23 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +try: + import crewai +except ImportError: + # This means that crewai is not installed. This case is already managed by the lazy loader in the public modules + pass +except RuntimeError as e: + # ChromaDB requires a relatively new version of SQLite which is not always supported + # If the import fails because of that, we try to override the sqlite version with the python version of it + # If even that is not available, we fail in the end + if "Your system has an unsupported version of sqlite3" in str(e): + __import__("pysqlite3") + import sys + + sys.modules["sqlite3"] = sys.modules.pop("pysqlite3") + else: + # This is another runtime error, we raise it normally + raise e diff --git a/pyagentspec/tests/adapters/crewai/configs/weather_agent_remote_tool.yaml b/pyagentspec/tests/adapters/crewai/configs/weather_agent_remote_tool.yaml new file mode 100644 index 00000000..1f5c3273 --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/configs/weather_agent_remote_tool.yaml @@ -0,0 +1,44 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +component_type: Agent +description: Weather agent +id: 6135866f-7fbe-4874-8a84-4b2e021aa721 +inputs: [] +llm_config: + component_type: VllmConfig + default_generation_parameters: {} + description: "" + id: fde915e6-4f21-4e5a-a0a5-d6121c2655f6 + metadata: + __metadata_info__: {} + model_id: /storage/models/Llama-3.3-70B-Instruct + name: Llama-3.3-70B-Instruct + url: [[LLAMA70BV33_API_URL]] +metadata: + __metadata_info__: {} +name: agent_87040c07 +outputs: [] +system_prompt: You are a weather agent. Use your tool to answer user questions about weather in particular cities. +tools: +- component_type: RemoteTool + description: Retrieves the weather in a city + headers: {} + http_method: GET + id: 7767612c-8326-4afd-a037-c1360589c202 + inputs: + - description: The city to get the weather for + title: city + type: string + metadata: {} + name: get_weather + outputs: + - description: The weather in the city + title: weather + type: string + query_params: {} + url: "[[remote_tools_server]]/api/weather/{{city}}" +agentspec_version: "25.4.1" diff --git a/pyagentspec/tests/adapters/crewai/conftest.py b/pyagentspec/tests/adapters/crewai/conftest.py new file mode 100644 index 00000000..75d086a1 --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/conftest.py @@ -0,0 +1,56 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +import json +import os +from pathlib import Path +from typing import Any +from urllib.parse import urljoin + +import pytest +import requests + +from ..conftest import skip_tests_if_dependency_not_installed + + +def pytest_collection_modifyitems(config: Any, items: Any): + # We skip all the tests in this folder if crewai is not installed + skip_tests_if_dependency_not_installed( + module_name="crewai", + directory=Path(__file__).parent, + items=items, + ) + + +@pytest.fixture(scope="package", autouse=True) +def _disable_tracing(): + """Disable the automatic tracing of crewai""" + old_value = os.environ.get("CREWAI_DISABLE_TELEMETRY", None) + os.environ["CREWAI_DISABLE_TELEMETRY"] = "true" + try: + yield + finally: + if old_value is not None: + os.environ["CREWAI_DISABLE_TELEMETRY"] = old_value + + +@pytest.fixture(autouse=True) +def no_network_plusapi(monkeypatch): + try: + from crewai.cli.plus_api import PlusAPI + + def fake_response(self, method: str, endpoint: str, **kwargs) -> requests.Response: + resp = requests.Response() + resp.status_code = 200 + resp.url = urljoin(self.base_url, endpoint) + resp.headers["Content-Type"] = "application/json" + resp._content = json.dumps({"ok": True}).encode("utf-8") + resp.encoding = "utf-8" + return resp + + monkeypatch.setattr(PlusAPI, "_make_request", fake_response, raising=True) + except ImportError: + pass diff --git a/pyagentspec/tests/adapters/crewai/test_agentspec_to_crewai.py b/pyagentspec/tests/adapters/crewai/test_agentspec_to_crewai.py new file mode 100644 index 00000000..0282214f --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/test_agentspec_to_crewai.py @@ -0,0 +1,32 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +from pathlib import Path + +from ..conftest import _replace_config_placeholders + +CONFIGS = Path(__file__).parent / "configs" + + +def test_remote_tool(json_server: str) -> None: + + from pyagentspec.adapters.crewai import AgentSpecLoader + from pyagentspec.adapters.crewai._types import crewai + + yaml_content = (CONFIGS / "weather_agent_remote_tool.yaml").read_text() + final_yaml = _replace_config_placeholders(yaml_content, json_server) + weather_agent = AgentSpecLoader().load_yaml(final_yaml) + + task = crewai.Task( + description="Use your tool to answer this simple request from the user: {user_input}", + expected_output="A helpful, concise reply to the user.", + agent=weather_agent, + ) + crew = crewai.Crew(agents=[weather_agent], tasks=[task], verbose=False) + response = crew.kickoff(inputs={"user_input": "What's the weather in Agadir?"}) + assert all(x in str(response) for x in ("Agadir", "sunny")) or all( + x in str(response) for x in ("agadir", "sunny") + ) diff --git a/pyagentspec/tests/adapters/crewai/test_converter.py b/pyagentspec/tests/adapters/crewai/test_converter.py new file mode 100644 index 00000000..f8203d71 --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/test_converter.py @@ -0,0 +1,136 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. +import pytest +import yaml +from pydantic import BaseModel + +from pyagentspec import Agent +from pyagentspec.llms import ( + LlmConfig, + LlmGenerationConfig, + OllamaConfig, + OpenAiCompatibleConfig, + OpenAiConfig, +) +from pyagentspec.property import StringProperty +from pyagentspec.serialization import AgentSpecSerializer +from pyagentspec.tools import ClientTool, RemoteTool, ServerTool + +# mypy: ignore-errors + + +def mock_tool() -> str: + return "CrewAI is a framework for building multi-agent applications." + + +def test_crewai_agent_can_be_converted_to_agentspec() -> None: + + from pyagentspec.adapters.crewai import AgentSpecExporter + from pyagentspec.adapters.crewai._types import CrewAITool, crewai + + class MockToolSchema(BaseModel): + pass + + crewai_mock_tool = CrewAITool( + name="mock_tool", + description="Mocked tool", + args_schema=MockToolSchema, + func=mock_tool, + ) + + agent = crewai.Agent( + role="crew_ai_assistant", + goal="Use tools to solve tasks.", + backstory="You are a helpful assistant", + llm=crewai.LLM( + model="ollama/agi_model", + base_url="url_to_my_agi_model", + max_tokens=200, + ), + tools=[crewai_mock_tool], + ) + + exporter = AgentSpecExporter() + agentspec_yaml = exporter.to_yaml(agent) + agentspec_dict = yaml.safe_load(agentspec_yaml) + assert "component_type" in agentspec_dict + assert agentspec_dict["component_type"] == "Agent" + assert agentspec_dict["name"] == "crew_ai_assistant" + assert agentspec_dict["system_prompt"] == "Use tools to solve tasks." + # Check LLM + assert "llm_config" in agentspec_dict + assert "component_type" in agentspec_dict["llm_config"] + assert agentspec_dict["llm_config"]["component_type"] == "OllamaConfig" + # Check Tools + assert "tools" in agentspec_dict + assert isinstance(agentspec_dict["tools"], list) + assert len(agentspec_dict["tools"]) == 1 + assert "component_type" in agentspec_dict["tools"][0] + assert agentspec_dict["tools"][0]["component_type"] == "ServerTool" + assert agentspec_dict["tools"][0]["name"] == "mock_tool" + + +@pytest.mark.parametrize( + "llm_config", + [ + OllamaConfig( + name="agi_model", + model_id="agi_model", + url="url_to_my_agi_model", + default_generation_parameters=LlmGenerationConfig(max_tokens=200), + ), + OpenAiCompatibleConfig( + name="agi_model", + model_id="agi_model", + url="url_to_my_agi_model", + default_generation_parameters=LlmGenerationConfig(temperature=200), + ), + OpenAiConfig( + name="agi_model", + model_id="agi_model", + default_generation_parameters=LlmGenerationConfig(top_p=0.3), + ), + ], +) +def test_agentspec_agent_can_be_converted_to_crewai(llm_config: LlmConfig) -> None: + from pyagentspec.adapters.crewai import AgentSpecLoader + from pyagentspec.adapters.crewai._types import crewai + + agent = Agent( + name="crew_ai_assistant", + description="You are a helpful assistant", + llm_config=llm_config, + tools=[ + ServerTool( + name="mock_tool_server", inputs=[], outputs=[StringProperty(title="output")] + ), + ClientTool( + name="mock_tool_client", + inputs=[StringProperty(title="input_2")], + outputs=[StringProperty(title="output_2")], + ), + RemoteTool( + name="mock_tool_remote", + url="my.remote.server", + http_method="GET", + data={"in": "{{input_3}}"}, + inputs=[StringProperty(title="input_3")], + outputs=[StringProperty(title="output_3")], + ), + ], + system_prompt="Use tools to solve tasks.", + ) + agentspec_yaml = AgentSpecSerializer().to_yaml(agent) + + crewai_assistant = AgentSpecLoader(tool_registry={"mock_tool_server": mock_tool}).load_yaml( + agentspec_yaml + ) + assert isinstance(crewai_assistant, crewai.Agent) + assert crewai_assistant.role == "crew_ai_assistant" + assert crewai_assistant.goal == "Use tools to solve tasks." + assert crewai_assistant.backstory == "You are a helpful assistant" + assert len(crewai_assistant.tools) == 3 + assert isinstance(crewai_assistant.llm, crewai.LLM) diff --git a/pyagentspec/tests/adapters/crewai/test_lazy_import.py b/pyagentspec/tests/adapters/crewai/test_lazy_import.py new file mode 100644 index 00000000..57a62a40 --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/test_lazy_import.py @@ -0,0 +1,12 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +import pytest + + +def test_import_raises_if_crewai_not_installed(): + with pytest.raises(ImportError, match="Package crewai is not installed."): + import pyagentspec.adapters.crewai # type: ignore diff --git a/pyagentspec/tests/adapters/crewai/test_quickstart_example.py b/pyagentspec/tests/adapters/crewai/test_quickstart_example.py new file mode 100644 index 00000000..6c3ba761 --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/test_quickstart_example.py @@ -0,0 +1,72 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. + +import anyio + +from pyagentspec.agent import Agent + + +def test_quickstart_example_runs(quickstart_agent_json: Agent): + + from crewai import Crew, Task + + from pyagentspec.adapters.crewai import AgentSpecLoader + + def subtract(a: float, b: float) -> float: + return a - b + + async def main(): + loader = AgentSpecLoader(tool_registry={"subtraction-tool": subtract}) + assistant = loader.load_json(quickstart_agent_json) + + task = Task( + description="{user_input}", + expected_output="A helpful, concise reply to the user.", + agent=assistant, + async_execution=True, + ) + crew = Crew(agents=[assistant], tasks=[task]) + _ = await crew.kickoff_async(inputs={"user_input": "Compute 987654321-123456789"}) + + anyio.run(main) + + +def test_can_convert_quickstart_example_to_agentspec() -> None: + from crewai import LLM, Agent + from crewai.tools.base_tool import Tool + from pydantic import BaseModel + + from pyagentspec.adapters.crewai import AgentSpecExporter + + class InputSchema(BaseModel): + a: float + b: float + + def subtract(a: float, b: float) -> float: + """Subtract two numbers""" + return a - b + + llm = LLM( + model="hosted_vllm/Llama-4-Maverick", + api_base="http://url.to.my.llama.model/v1", + max_tokens=512, + ) + + crewai_agent = Agent( + role="Calculator agent", + goal="Computes the mathematical operation prompted by the user", + backstory="You are a calculator with 20 years of experience", + llm=llm, + tools=[ + Tool( + name="subtract", + description="Subtract two numbers", + args_schema=InputSchema, + func=subtract, + ), + ], + ) + _ = AgentSpecExporter().to_json(crewai_agent) diff --git a/pyagentspec/tests/adapters/crewai/test_tracing.py b/pyagentspec/tests/adapters/crewai/test_tracing.py new file mode 100644 index 00000000..55913243 --- /dev/null +++ b/pyagentspec/tests/adapters/crewai/test_tracing.py @@ -0,0 +1,167 @@ +# Copyright © 2025 Oracle and/or its affiliates. +# +# This software is under the Apache License 2.0 +# (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or Universal Permissive License +# (UPL) 1.0 (LICENSE-UPL or https://oss.oracle.com/licenses/upl), at your option. +from pathlib import Path +from typing import List, Tuple + +from pyagentspec.tracing.events import ( + AgentExecutionEnd, + AgentExecutionStart, + Event, + LlmGenerationRequest, + LlmGenerationResponse, + ToolExecutionRequest, + ToolExecutionResponse, +) +from pyagentspec.tracing.spanprocessor import SpanProcessor +from pyagentspec.tracing.spans import AgentExecutionSpan, LlmGenerationSpan, Span, ToolExecutionSpan +from pyagentspec.tracing.trace import Trace + +from ..conftest import _replace_config_placeholders + +CONFIGS = Path(__file__).parent / "configs" + + +class DummySpanProcessor(SpanProcessor): + """ + Minimal processor mirroring the behavior used in tests/tracing/test_tracing.py + to capture span lifecycle and events for assertions. + """ + + def __init__(self, mask_sensitive_information: bool = True) -> None: + super().__init__(mask_sensitive_information=mask_sensitive_information) + self.started_up = False + self.shut_down = False + self.started_up_async = False + self.shut_down_async = False + self.starts: List[Span] = [] + self.ends: List[Span] = [] + self.events: List[Tuple[Event, Span]] = [] + self.starts_async: List[Span] = [] + self.ends_async: List[Span] = [] + self.events_async: List[Tuple[Event, Span]] = [] + + def on_start(self, span: Span) -> None: + self.starts.append(span) + + async def on_start_async(self, span: Span) -> None: + self.starts_async.append(span) + + def on_end(self, span: Span) -> None: + self.ends.append(span) + + async def on_end_async(self, span: Span) -> None: + self.ends_async.append(span) + + def on_event(self, event: Event, span: Span) -> None: + self.events.append((event, span)) + + async def on_event_async(self, event: Event, span: Span) -> None: + self.events_async.append((event, span)) + + def startup(self) -> None: + self.started_up = True + + def shutdown(self) -> None: + self.shut_down = True + + async def startup_async(self) -> None: + self.started_up_async = True + + async def shutdown_async(self) -> None: + self.shut_down_async = True + + +def check_dummyspanprocessor_events_and_spans(span_processor: DummySpanProcessor) -> None: + # Assertions on spans started/ended + # We expect at least one of each span type during a normal run + started_types = [type(s) for s in span_processor.starts] + ended_types = [type(s) for s in span_processor.ends] + assert any( + issubclass(t, AgentExecutionSpan) for t in started_types + ), "AgentExecutionSpan did not start" + assert any( + issubclass(t, AgentExecutionSpan) for t in ended_types + ), "AgentExecutionSpan did not end" + + assert any( + issubclass(t, LlmGenerationSpan) for t in started_types + ), "LlmGenerationSpan did not start" + assert any( + issubclass(t, LlmGenerationSpan) for t in ended_types + ), "LlmGenerationSpan did not end" + + assert any( + issubclass(t, ToolExecutionSpan) for t in started_types + ), "ToolExecutionSpan did not start" + assert any( + issubclass(t, ToolExecutionSpan) for t in ended_types + ), "ToolExecutionSpan did not end" + + # Assertions on key events observed + event_types = [type(e) for (e, _s) in span_processor.events] + assert any( + issubclass(t, AgentExecutionStart) for t in event_types + ), "AgentExecutionStart not emitted" + assert any( + issubclass(t, AgentExecutionEnd) for t in event_types + ), "AgentExecutionEnd not emitted" + assert any( + issubclass(t, LlmGenerationRequest) for t in event_types + ), "LlmGenerationRequest not emitted" + assert any( + issubclass(t, LlmGenerationResponse) for t in event_types + ), "LlmGenerationResponse not emitted" + assert any( + issubclass(t, ToolExecutionRequest) for t in event_types + ), "ToolExecutionRequest not emitted" + assert any( + issubclass(t, ToolExecutionResponse) for t in event_types + ), "ToolExecutionResponse not emitted" + + +def test_crewai_crew_tracing_emits_agent_llm_and_tool_events(json_server: str) -> None: + + from pyagentspec.adapters.crewai import AgentSpecLoader + from pyagentspec.adapters.crewai._types import crewai + + # Prepare YAML config with placeholders replaced + yaml_content = (CONFIGS / "weather_agent_remote_tool.yaml").read_text() + final_yaml = _replace_config_placeholders(yaml_content, json_server) + weather_agent = AgentSpecLoader().load_yaml(final_yaml) + + # Build a simple task/crew run + task = crewai.Task( + description="Use your tool to answer this simple request from the user: {user_input}", + expected_output="A helpful, concise reply to the user.", + agent=weather_agent, + ) + crew = crewai.Crew(agents=[weather_agent], tasks=[task], verbose=False) + + proc = DummySpanProcessor() + with Trace(name="crewai_tracing_test", span_processors=[proc]): + with weather_agent.agentspec_event_listener(): + response = crew.kickoff(inputs={"user_input": "What's the weather in Agadir?"}) + assert "sunny" in str(response).lower() + + check_dummyspanprocessor_events_and_spans(proc) + + +def test_crewai_agent_tracing_emits_agent_llm_and_tool_events(json_server: str) -> None: + + from pyagentspec.adapters.crewai import AgentSpecLoader + + # Prepare YAML config with placeholders replaced + yaml_content = (CONFIGS / "weather_agent_remote_tool.yaml").read_text() + final_yaml = _replace_config_placeholders(yaml_content, json_server) + weather_agent = AgentSpecLoader().load_yaml(final_yaml) + + proc = DummySpanProcessor() + with Trace(name="crewai_tracing_test", span_processors=[proc]): + with weather_agent.agentspec_event_listener(): + response = weather_agent.kickoff(messages="What's the weather in Agadir?") + assert "sunny" in str(response).lower() + + check_dummyspanprocessor_events_and_spans(proc) diff --git a/pyagentspec/tests/conftest.py b/pyagentspec/tests/conftest.py index 4897cd8e..e713ee18 100644 --- a/pyagentspec/tests/conftest.py +++ b/pyagentspec/tests/conftest.py @@ -154,17 +154,31 @@ def get_directory_allowlist_write(tmp_path: str, session_tmp_path: str) -> List[ def get_directory_allowlist_read(tmp_path: str, session_tmp_path: str) -> List[Union[str, Path]]: - return get_directory_allowlist_write(tmp_path, session_tmp_path) + [ - CONFIGS_DIR, - # Docs path - Path(os.path.dirname(__file__)).parent.parent / "docs" / "pyagentspec" / "source", - # Used in docstring tests - Path(os.path.dirname(__file__)).parent / "src" / "pyagentspec", - Path("~/.pdbrc").expanduser(), - Path(os.path.dirname(__file__)).parent / ".pdbrc", - Path(os.path.dirname(__file__)) / ".pdbrc", - Path("/etc/os-release"), - ] + try: + # Crew AI sometimes attempts to read in some folders, we need to take that into account + from crewai.cli.shared.token_manager import TokenManager + + crewai_read_dirs = [ + TokenManager.get_secure_storage_path(), + "/etc/os-release", + ] + except ImportError: + crewai_read_dirs = [] + return ( + get_directory_allowlist_write(tmp_path, session_tmp_path) + + [ + CONFIGS_DIR, + # Docs path + Path(os.path.dirname(__file__)).parent.parent / "docs" / "pyagentspec" / "source", + # Used in docstring tests + Path(os.path.dirname(__file__)).parent / "src" / "pyagentspec", + Path("~/.pdbrc").expanduser(), + Path(os.path.dirname(__file__)).parent / ".pdbrc", + Path(os.path.dirname(__file__)) / ".pdbrc", + Path("/etc/os-release"), + ] + + crewai_read_dirs + ) def check_allowed_filewrite(