Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ This repository provides a comprehensive, hands-on comparison of modern AI agent
</picture>
<strong style="vertical-align: middle; font-size: 1.2em;">Autogen</strong>
</td>
<td><code>0.7.2</code></td>
<td><code>0.7.5</code></td>
<td>
<a href="https://microsoft.github.io/autogen/stable/index.html">
<picture>
Expand Down
2 changes: 1 addition & 1 deletion autogen/.env.example
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# used for settings, copy to a file named .env and change the values
OPENAI_API_KEY=your-openai-api-key
OPENAI_MODEL_NAME=gpt-4o
OPENAI_MODEL_NAME=gpt-4o-mini
47 changes: 47 additions & 0 deletions autogen/00_hello_world.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import asyncio

from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient

from settings import settings

"""
------------------------------------------------------------------------
In this example, we explore Autogen agents with the following features:
- Basic agent creation
- Running a simple task
- Streaming output to console

This example shows the simplest possible Autogen agent: an AssistantAgent
that responds to a single user message. We use Console() to display the
streamed output with formatting and token usage statistics.

For more details, visit:
https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/quickstart.html
------------------------------------------------------------------------
"""


async def main() -> None:
# --- 1. Define the model client ---
model_client = OpenAIChatCompletionClient(
model=settings.OPENAI_MODEL_NAME,
api_key=settings.OPENAI_API_KEY.get_secret_value(),
)

# --- 2. Define the agent ---
agent = AssistantAgent(
name="assistant",
model_client=model_client,
)

# --- 3. Run the agent with a user message ---
await Console(agent.run_stream(task="Say 'Hello World!'"))

# --- 4. Close the model client ---
await model_client.close()


if __name__ == "__main__":
asyncio.run(main())
76 changes: 76 additions & 0 deletions autogen/01_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import asyncio

from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_core.tools import FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient

from settings import settings

"""
------------------------------------------------------------------------
In this example, we explore Autogen agents with the following features:
- Tool usage with function tools
- Automatic tool schema generation
- Tool call execution and result handling

This example shows how to define Python functions as tools that agents
can invoke. Autogen automatically generates tool schemas from function
signatures and docstrings, and the AssistantAgent executes tools within
its run loop.

For more details, visit:
https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/agents.html
------------------------------------------------------------------------
"""


# --- 1. Define a tool that searches the web for information ---
# For simplicity, we use a mock function here that returns a static string.
async def web_search_func(query: str) -> str:
"""Find information on the web"""
return "AutoGen is a programming framework for building multi-agent applications."


# NOTE: This step is automatically performed inside the AssistantAgent
# if the tool is a Python function.
web_search_function_tool = FunctionTool(
web_search_func, description="Find information on the web"
)


async def main() -> None:
# --- 2. Define the model client ---
model_client = OpenAIChatCompletionClient(
model=settings.OPENAI_MODEL_NAME,
api_key=settings.OPENAI_API_KEY.get_secret_value(),
)

# --- 3. Define the agent ---
agent = AssistantAgent(
name="assistant",
model_client=model_client,
tools=[web_search_func],
system_message="Use tools to solve tasks.",
)

# --- 4. Run the agent and stream the output ---
result = await Console(
agent.run_stream(task="Find information on AutoGen"),
output_stats=True,
)

# --- 5. Print the final answer ---
print("-" * 50)
print("Final Answer:", result.messages[-1].content)

# --- 6. Print the tool schema ---
print("-" * 50)
print("Tool Schema:", web_search_function_tool.schema)

# --- 7. Close the model client ---
await model_client.close()


if __name__ == "__main__":
asyncio.run(main())
63 changes: 63 additions & 0 deletions autogen/02_streaming_and_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import asyncio

from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient

from settings import settings

"""
------------------------------------------------------------------------
In this example, we explore Autogen agents with the following features:
- Custom tool definition
- Streaming responses via Console
- Token usage statistics and metrics

This example shows the different ways to stream responses and collect
metrics using Autogen. The Console utility formats streamed messages and
optionally prints token usage stats at the end.

For more details, visit:
https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/agents.html
------------------------------------------------------------------------
"""


# --- 1. Define a tool that calculates the sum of a list of integers ---
def add_numbers(values: list[int]) -> int:
"""Calculate the sum of a list of integers."""
return sum(values)


async def main() -> None:
# --- 2. Define the model client ---
model_client = OpenAIChatCompletionClient(
model=settings.OPENAI_MODEL_NAME,
api_key=settings.OPENAI_API_KEY.get_secret_value(),
)

# --- 3. Define the agent ---
agent = AssistantAgent(
name="assistant",
model_client=model_client,
tools=[add_numbers],
system_message="Use tools to solve tasks.",
)

# --- 4. Stream the agent response with metrics ---
# Option 1: read each message from the stream individually.
# async for message in agent.run_stream(task="What is the result of 2 + 4?"):
# print(message)

# Option 2: use Console to print all messages as they appear.
await Console(
agent.run_stream(task="What is the result of 2 + 4?"),
output_stats=True, # Enable stats/metrics printing
)

# --- 5. Close the model client ---
await model_client.close()


if __name__ == "__main__":
asyncio.run(main())
65 changes: 65 additions & 0 deletions autogen/03_structured_outputs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import asyncio
from typing import Literal

from pydantic import BaseModel
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import StructuredMessage
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient

from settings import settings

"""
------------------------------------------------------------------------
In this example, we explore Autogen agents with the following features:
- Structured outputs with Pydantic models
- Chain-of-thought reasoning via thoughts field
- Type-validated agent responses

This example shows how to define structured outputs for models.
Structured output allows models to return structured JSON text with a
pre-defined schema provided as a Pydantic BaseModel class, which can
also be used to validate the output programmatically.

For more details, visit:
https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/agents.html
------------------------------------------------------------------------
"""


# --- 1. Define the Agent response model ---
class AgentResponse(BaseModel):
thoughts: str
response: Literal["happy", "sad", "neutral"]


async def main() -> None:
# --- 2. Define the model client ---
model_client = OpenAIChatCompletionClient(
model=settings.OPENAI_MODEL_NAME,
api_key=settings.OPENAI_API_KEY.get_secret_value(),
)

# --- 3. Define the agent ---
agent = AssistantAgent(
"assistant",
model_client=model_client,
system_message="Categorize the input as happy, sad, or neutral following the JSON format.",
output_content_type=AgentResponse,
)

# --- 4. Run the agent and print the result ---
result = await Console(agent.run_stream(task="I am happy."))

# --- 5. Validate and print structured output ---
assert isinstance(result.messages[-1], StructuredMessage)
assert isinstance(result.messages[-1].content, AgentResponse)
print("Thought:", result.messages[-1].content.thoughts)
print("Response:", result.messages[-1].content.response)

# --- 6. Close the model client ---
await model_client.close()


if __name__ == "__main__":
asyncio.run(main())
77 changes: 77 additions & 0 deletions autogen/04_human_in_the_loop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import asyncio

from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.base import Handoff
from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient

from settings import settings

"""
------------------------------------------------------------------------
In this example, we explore Autogen agents with the following features:
- Human-in-the-loop via HandoffTermination
- Agent handoff to user for input
- Resuming a team run with user feedback

This example shows how to integrate human feedback into agent workflows
using the handoff pattern. When the agent cannot complete a task alone,
it hands off to the user. The team pauses, and the application provides
the needed information in the next run() call to resume execution.

For more details, visit:
https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/human-in-the-loop.html
------------------------------------------------------------------------
"""


async def main() -> None:
# --- 1. Define the model client ---
model_client = OpenAIChatCompletionClient(
model=settings.OPENAI_MODEL_NAME,
api_key=settings.OPENAI_API_KEY.get_secret_value(),
)

# --- 2. Create an agent that hands off to the user when it cannot proceed ---
lazy_agent = AssistantAgent(
"lazy_assistant",
model_client=model_client,
handoffs=[Handoff(target="user", message="Transfer to user.")],
system_message=(
"If you cannot complete the task, transfer to user. "
"Otherwise, when finished, respond with 'TERMINATE'."
),
)

# --- 3. Define termination conditions ---
handoff_termination = HandoffTermination(target="user")
text_termination = TextMentionTermination("TERMINATE")

# --- 4. Create a single-agent team ---
team = RoundRobinGroupChat(
[lazy_agent],
termination_condition=handoff_termination | text_termination,
)

# --- 5. First run: the agent hands off to the user ---
print("=" * 50)
print("FIRST RUN: Agent will hand off to user")
print("=" * 50)
await Console(team.run_stream(task="What is the weather in New York?"))

# --- 6. Second run: provide the information the agent needs ---
print("\n" + "=" * 50)
print("SECOND RUN: User provides the answer")
print("=" * 50)
await Console(
team.run_stream(task="The weather in New York is sunny and 72 degrees.")
)

# --- 7. Close the model client ---
await model_client.close()


if __name__ == "__main__":
asyncio.run(main())
Loading