Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ This repository provides a comprehensive, hands-on comparison of modern AI agent
<img src="res/ag2.svg" alt="AG2" width="52" style="vertical-align: middle;">
</picture>
</td>
<td><code>0.9.1</code></td>
<td><code>0.11.4</code></td>
<td>
<a href="https://docs.ag2.ai/latest/">
<picture>
Expand Down
48 changes: 48 additions & 0 deletions ag2/00_simple_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import os

from autogen import ConversableAgent, LLMConfig

from settings import settings

os.environ["OPENAI_API_KEY"] = settings.OPENAI_API_KEY.get_secret_value()

"""
-------------------------------------------------------
In this example, we explore AG2 with the following features:
- Creating a simple ConversableAgent
- Using LLMConfig with the new dict-based configuration
- Running a single-turn conversation with run() and process()

AG2 (formerly AutoGen) is a multi-agent framework where
ConversableAgent is the core building block. This example
shows the simplest possible agent interaction.

For more details, visit:
https://docs.ag2.ai/latest/docs/user-guide/basic-concepts/conversable-agent/
-------------------------------------------------------
"""

# --- 1. Configure the LLM ---
llm_config = LLMConfig({"model": settings.OPENAI_MODEL_NAME})

# --- 2. Create a simple agent ---
agent = ConversableAgent(
name="assistant",
system_message="You are a helpful assistant. Be concise, reply in 1-2 sentences.",
llm_config=llm_config,
human_input_mode="NEVER",
)

# --- 3. Run the agent and print the conversation ---
result = agent.run(
message="Where does the phrase 'hello world' come from?",
max_turns=1,
user_input=False,
)

# process() prints the conversation to stdout
result.process()

# --- 4. Print the summary ---
print("\n=== Summary ===")
print(result.summary)
73 changes: 73 additions & 0 deletions ag2/01_agent_with_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import os

from autogen import ConversableAgent, LLMConfig

from settings import settings

os.environ["OPENAI_API_KEY"] = settings.OPENAI_API_KEY.get_secret_value()

"""
-------------------------------------------------------
In this example, we explore AG2 with the following features:
- Defining custom tool functions for agents
- Using the functions= parameter on ConversableAgent
- Automatic tool execution with run() and process()

Tools let agents call Python functions to retrieve data
or perform actions. AG2 handles the tool calling loop
automatically — the agent decides which tool to call,
and AG2 executes it and feeds results back.

For more details, visit:
https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/tools/basics/
-------------------------------------------------------
"""


# --- 1. Define custom tools ---
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
weather_data = {
"lisbon": "Sunny, 25°C",
"london": "Cloudy, 15°C",
"tokyo": "Rainy, 18°C",
}
return weather_data.get(city.lower(), f"No weather data for {city}")


def get_population(city: str) -> str:
"""Get the population of a city."""
pop_data = {
"lisbon": "~550,000",
"london": "~9,000,000",
"tokyo": "~14,000,000",
}
return pop_data.get(city.lower(), f"No population data for {city}")


# --- 2. Create an agent with tools ---
llm_config = LLMConfig({"model": settings.OPENAI_MODEL_NAME})

assistant = ConversableAgent(
name="assistant",
system_message=(
"You are a helpful city information assistant. "
"Use tools to look up data, then give a brief summary. "
"Reply in 2-3 sentences max."
),
llm_config=llm_config,
functions=[get_weather, get_population],
human_input_mode="NEVER",
)

# --- 3. Run the agent with a query that triggers tools ---
result = assistant.run(
message="What's the weather and population of Lisbon?",
max_turns=2,
user_input=False,
)
result.process()

# --- 4. Print the summary ---
print("\n=== Summary ===")
print(result.summary)
71 changes: 71 additions & 0 deletions ag2/02_structured_outputs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import os

from pydantic import BaseModel

from autogen import ConversableAgent, LLMConfig

from settings import settings

os.environ["OPENAI_API_KEY"] = settings.OPENAI_API_KEY.get_secret_value()

"""
-------------------------------------------------------
In this example, we explore AG2 with the following features:
- Structured outputs using Pydantic models
- response_format parameter in LLMConfig
- Guaranteed schema-conforming JSON responses

Structured outputs force the LLM to return data matching
a Pydantic model schema, making agent outputs reliable
for downstream processing without manual parsing.

For more details, visit:
https://docs.ag2.ai/latest/docs/user-guide/basic-concepts/structured-output/
-------------------------------------------------------
"""


# --- 1. Define the Pydantic output model ---
class CityInfo(BaseModel):
name: str
country: str
population: str
famous_for: str
best_time_to_visit: str


# --- 2. Create LLM config with response_format ---
llm_config = LLMConfig(
{"model": settings.OPENAI_MODEL_NAME},
response_format=CityInfo,
)

# --- 3. Create the agent ---
agent = ConversableAgent(
name="city_expert",
system_message=(
"You are a city information expert. "
"When asked about a city, provide accurate structured information."
),
llm_config=llm_config,
human_input_mode="NEVER",
)

# --- 4. Run the agent ---
result = agent.run(
message="Tell me about Tokyo.",
max_turns=1,
user_input=False,
)
result.process()

# --- 5. Parse and display structured output ---
last_message = result.messages[-1]["content"]
city = CityInfo.model_validate_json(last_message)

print("\n=== Parsed Structured Output ===")
print(f" City: {city.name}")
print(f" Country: {city.country}")
print(f" Population: {city.population}")
print(f" Famous for: {city.famous_for}")
print(f" Best time to visit: {city.best_time_to_visit}")
81 changes: 81 additions & 0 deletions ag2/03_human_in_the_loop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import os

from autogen import ConversableAgent, LLMConfig

from settings import settings

os.environ["OPENAI_API_KEY"] = settings.OPENAI_API_KEY.get_secret_value()

"""
-------------------------------------------------------
In this example, we explore AG2 with the following features:
- Human-in-the-loop via max_consecutive_auto_reply
- Simulated human approval using a custom reply function
- Controlling conversation flow with termination conditions

Real human-in-the-loop uses human_input_mode="ALWAYS" which
requires interactive input. This example simulates human
oversight by using a reply function that auto-approves after
reviewing the agent's plan, demonstrating the pattern without
blocking execution.

For more details, visit:
https://docs.ag2.ai/latest/docs/user-guide/basic-concepts/human-in-the-loop/
-------------------------------------------------------
"""

# --- 1. Configure LLM ---
llm_config = LLMConfig({"model": settings.OPENAI_MODEL_NAME})

# --- 2. Create the assistant agent ---
assistant = ConversableAgent(
name="assistant",
system_message=(
"You are a travel planner. When given a destination, "
"propose a brief 3-day itinerary (3-5 bullet points). "
"After the human approves, say TERMINATE."
),
llm_config=llm_config,
human_input_mode="NEVER",
)

# --- 3. Create a human proxy that auto-approves ---
# In production, this would use human_input_mode="ALWAYS" for real input.
# Here we simulate a human who reviews and approves the plan.
approval_count = 0


def simulated_human_reply(
recipient: ConversableAgent,
messages: list[dict] | None = None,
sender: ConversableAgent | None = None,
config: dict | None = None,
) -> tuple[bool, str]:
"""Simulate a human reviewing and approving the assistant's plan."""
global approval_count
approval_count += 1
if approval_count == 1:
print("\n[Simulated Human] Reviewing the plan...")
return True, "Looks good! I approve this itinerary. Please finalize it."
return True, "Thank you!"


human = ConversableAgent(
name="human",
llm_config=False,
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
is_termination_msg=lambda x: "TERMINATE" in (x.get("content", "") or ""),
)
human.register_reply([ConversableAgent, None], simulated_human_reply)

# --- 4. Run the conversation ---
print("=== Human-in-the-Loop: Travel Planning ===\n")
human.initiate_chat(
assistant,
message="Plan a 3-day trip to Barcelona.",
max_turns=3,
)

print("\n=== Conversation Complete ===")
print(f"Human reviewed and approved after {approval_count} interaction(s).")
88 changes: 88 additions & 0 deletions ag2/04_multi_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import os

from autogen import ConversableAgent, LLMConfig
from autogen.agentchat import initiate_group_chat
from autogen.agentchat.group.patterns import AutoPattern

from settings import settings

os.environ["OPENAI_API_KEY"] = settings.OPENAI_API_KEY.get_secret_value()

"""
-------------------------------------------------------
In this example, we explore AG2 with the following features:
- Multi-agent group chat with pattern-based orchestration
- AutoPattern for automatic speaker selection
- initiate_group_chat() as the modern group chat API

AG2's group chat allows multiple specialized agents to
collaborate on a task. AutoPattern uses LLM-based speaker
selection to route the conversation to the most appropriate
agent at each turn.

For more details, visit:
https://docs.ag2.ai/latest/docs/user-guide/advanced-concepts/orchestration/group-chat/auto-pattern/
-------------------------------------------------------
"""

# --- 1. Configure LLM ---
llm_config = LLMConfig({"model": settings.OPENAI_MODEL_NAME})

# --- 2. Create specialized agents ---
researcher = ConversableAgent(
name="researcher",
system_message=(
"You are a research specialist. Find and present key facts "
"about the topic. Be concise — 3-4 bullet points max."
),
llm_config=llm_config,
human_input_mode="NEVER",
)

writer = ConversableAgent(
name="writer",
system_message=(
"You are a content writer. Take research findings and write "
"a brief, engaging summary paragraph (3-4 sentences). "
"When the summary is complete, end with TERMINATE."
),
llm_config=llm_config,
human_input_mode="NEVER",
)

critic = ConversableAgent(
name="critic",
system_message=(
"You are a quality reviewer. Check the writer's summary for "
"accuracy and clarity. Give brief feedback in 1-2 sentences, "
"or say 'Approved' if it's good."
),
llm_config=llm_config,
human_input_mode="NEVER",
)

# --- 3. Create a user proxy for initiating the chat ---
user = ConversableAgent(
name="user",
human_input_mode="NEVER",
llm_config=False,
is_termination_msg=lambda x: "TERMINATE" in (x.get("content", "") or ""),
)

# --- 4. Set up AutoPattern orchestration ---
pattern = AutoPattern(
initial_agent=researcher,
agents=[researcher, writer, critic],
user_agent=user,
group_manager_args={"llm_config": llm_config},
)

# --- 5. Run the group chat ---
print("=== Multi-Agent Group Chat ===\n")
result, context, last_agent = initiate_group_chat(
pattern=pattern,
messages="Research and write a brief summary about the history of the Python programming language.",
max_rounds=6,
)

print(f"\n=== Group Chat Complete (last speaker: {last_agent.name}) ===")
Loading