-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit a3ef242
Showing
10 changed files
with
1,284 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
venv | ||
.env | ||
|
||
.ruff | ||
|
||
__pycache__ | ||
*.pyc | ||
|
||
responses |
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
from typing import List | ||
|
||
|
||
class Debater: | ||
def __init__( | ||
self, | ||
llm_client, | ||
model: str, | ||
topic: str, | ||
position: str, | ||
) -> None: | ||
self.llm_client = llm_client | ||
self.model: str = model | ||
self.topic: str = topic | ||
self.position: str = position | ||
self.responses: List[str] = [] | ||
self.debate_history: List[str] = [] | ||
|
||
def start(self) -> str: | ||
initial_prompt: str = ( | ||
f"You are participating in a debate on the topic: '{self.topic}'. " | ||
f"You are {self.position} the proposition. Make a convincing opening " | ||
"argument for your position." | ||
) | ||
response: str = self.llm_client.get_response(initial_prompt, self.model) | ||
self.responses.append(response) | ||
self.debate_history.append( | ||
f"{self.position.capitalize()} opening argument: {response}" | ||
) | ||
return response | ||
|
||
def respond_to(self, opponent_argument: str) -> str: | ||
self.debate_history.append(f"Opponent's argument: {opponent_argument}") | ||
|
||
prompt: str = ( | ||
f"You are participating in a debate on the topic: '{self.topic}'. " | ||
f"You are {self.position} the proposition. Here's the debate history " | ||
"so far:\n\n" | ||
) | ||
prompt += "\n\n".join(self.debate_history) | ||
prompt += ( | ||
f"\n\nNow, respond to the opponent's latest argument, maintaining your " | ||
f"position {self.position} the proposition." | ||
) | ||
|
||
response: str = self.llm_client.get_response(prompt, self.model) | ||
self.responses.append(response) | ||
self.debate_history.append( | ||
f"{self.position.capitalize()} response: {response}" | ||
) | ||
return response | ||
|
||
def conclude(self) -> str: | ||
prompt: str = ( | ||
f"You have been participating in a debate on the topic: '{self.topic}'. " | ||
f"You are {self.position} the proposition. Here's the entire debate " | ||
"history:\n\n" | ||
) | ||
prompt += "\n\n".join(self.debate_history) | ||
prompt += ( | ||
"\n\nNow, provide a concluding statement for the debate, summarizing your " | ||
"position and the key points you've made." | ||
) | ||
response: str = self.llm_client.get_response(prompt, self.model) | ||
self.responses.append(response) | ||
self.debate_history.append( | ||
f"{self.position.capitalize()} conclusion: {response}" | ||
) | ||
return response |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,49 @@ | ||
import os | ||
from abc import ABC, abstractmethod | ||
from typing import Optional | ||
|
||
from anthropic import Anthropic | ||
from openai import OpenAI | ||
|
||
|
||
class LLMClient(ABC): | ||
@abstractmethod | ||
def get_response(self, prompt: str, model: str) -> str: | ||
pass | ||
|
||
class OpenAIClient(LLMClient): | ||
def __init__(self) -> None: | ||
self.api_key: Optional[str] = os.getenv("OPENAI_API_KEY") | ||
if not self.api_key: | ||
raise ValueError("OPENAI_API_KEY environment variable not set") | ||
self.client: OpenAI = OpenAI(api_key=self.api_key) | ||
|
||
def get_response(self, prompt: str, model: str) -> str: | ||
response = self.client.chat.completions.create( | ||
model=model, | ||
messages=[{"role": "user", "content": prompt}], | ||
) | ||
return response.choices[0].message.content | ||
|
||
class AnthropicClient(LLMClient): | ||
def __init__(self) -> None: | ||
self.api_key: Optional[str] = os.getenv("ANTHROPIC_API_KEY") | ||
if not self.api_key: | ||
raise ValueError("ANTHROPIC_API_KEY environment variable not set") | ||
self.client: Anthropic = Anthropic(api_key=self.api_key) | ||
|
||
def get_response(self, prompt: str, model: str) -> str: | ||
response = self.client.messages.create( | ||
model=model, | ||
messages=[{"role": "user", "content": prompt}], | ||
max_tokens=1000, | ||
) | ||
return response.content[0].text | ||
|
||
def get_llm_client(llm_type: str) -> LLMClient: | ||
if llm_type == "openai": | ||
return OpenAIClient() | ||
elif llm_type == "anthropic": | ||
return AnthropicClient() | ||
else: | ||
raise ValueError(f"Unsupported LLM type: {llm_type}") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
import argparse | ||
import logging | ||
import os | ||
import time | ||
from datetime import datetime | ||
from typing import Any, Dict, List | ||
|
||
from debater import Debater | ||
from llm_clients import get_llm_client | ||
from utils import generate_filename, save_html, save_json | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
LLM_CHOICES: List[str] = ["openai", "anthropic"] | ||
LOG_LEVELS: Dict[str, int] = { | ||
"DEBUG": logging.DEBUG, | ||
"INFO": logging.INFO, | ||
"WARNING": logging.WARNING, | ||
"ERROR": logging.ERROR, | ||
"CRITICAL": logging.CRITICAL, | ||
} | ||
|
||
def main(model: str, topic: str, llm_type: str, num_rounds: int) -> Dict[str, Any]: | ||
start_time = time.time() | ||
llm_client = get_llm_client(llm_type) | ||
for_debater: Debater = Debater(llm_client, model, topic, "for") | ||
against_debater: Debater = Debater(llm_client, model, topic, "against") | ||
|
||
debate_results: Dict[str, Any] = { | ||
"metadata": { | ||
"model": model, | ||
"topic": topic, | ||
"llm_type": llm_type, | ||
"num_rounds": num_rounds, | ||
"date": datetime.now().isoformat(), | ||
}, | ||
"debate": {}, | ||
} | ||
|
||
logger.info("Starting debate...") | ||
debate_results["debate"]["opening_arguments"] = { | ||
"for": for_debater.start(), | ||
"against": against_debater.start(), | ||
} | ||
|
||
logger.debug("\nInitial arguments:") | ||
logger.debug(f"For: {debate_results['debate']['opening_arguments']['for']}") | ||
logger.debug( | ||
f"\nAgainst: {debate_results['debate']['opening_arguments']['against']}" | ||
) | ||
|
||
for round in range(1, num_rounds + 1): | ||
logger.debug(f"\nRound {round}:") | ||
round_key = f"round_{round}" | ||
debate_results["debate"][round_key] = { | ||
"for": for_debater.respond_to(against_debater.responses[-1]), | ||
"against": against_debater.respond_to(for_debater.responses[-1]), | ||
} | ||
|
||
logger.debug(f"For: {debate_results['debate'][round_key]['for']}") | ||
logger.debug(f"\nAgainst: {debate_results['debate'][round_key]['against']}") | ||
|
||
logger.debug("\nConcluding statements:") | ||
debate_results["debate"]["conclusions"] = { | ||
"for": for_debater.conclude(), | ||
"against": against_debater.conclude(), | ||
} | ||
|
||
logger.debug(f"For: {debate_results['debate']['conclusions']['for']}") | ||
logger.debug(f"\nAgainst: {debate_results['debate']['conclusions']['against']}") | ||
|
||
end_time = time.time() | ||
debate_results["metadata"]["time_taken"] = end_time - start_time | ||
|
||
return debate_results | ||
|
||
if __name__ == "__main__": | ||
parser: argparse.ArgumentParser = argparse.ArgumentParser( | ||
description="Run a debate between two LLMs." | ||
) | ||
parser.add_argument("llm_type", choices=LLM_CHOICES, help="Type of LLM to use") | ||
parser.add_argument("model", help="Model name for the chosen LLM") | ||
parser.add_argument("topic", help="Topic of the debate") | ||
parser.add_argument( | ||
"--rounds", | ||
type=int, | ||
default=3, | ||
help="Number of debate rounds (default: 3)" | ||
) | ||
parser.add_argument( | ||
"--log-level", | ||
choices=LOG_LEVELS.keys(), | ||
default="INFO", | ||
help="Set the logging level", | ||
) | ||
parser.add_argument( | ||
"--filename", | ||
help="Custom output filename (without extension)", | ||
default=None | ||
) | ||
args: argparse.Namespace = parser.parse_args() | ||
|
||
logging.basicConfig( | ||
level=LOG_LEVELS[args.log_level], | ||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", | ||
) | ||
|
||
script_path = os.path.abspath(__file__) | ||
logger.info(f"Running script: {script_path}") | ||
|
||
results = main(args.model, args.topic, args.llm_type, args.rounds) | ||
|
||
if not args.filename: | ||
filename = generate_filename(args.topic, ) | ||
|
||
json_path = save_json(results, filename) | ||
html_path = save_html(results, filename) | ||
|
||
logger.info(f"Debate results saved to JSON: {json_path}") | ||
logger.info(f"Debate results saved to HTML: {html_path}") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
<!DOCTYPE html> | ||
<html lang="en"> | ||
<head> | ||
<meta charset="UTF-8"> | ||
<meta name="viewport" content="width=device-width, initial-scale=1.0"> | ||
<title>LLM Debate Results: {topic}</title> | ||
<style> | ||
body { | ||
font-family: Arial, sans-serif; | ||
line-height: 1.6; | ||
color: #333; | ||
max-width: 800px; | ||
margin: 0 auto; | ||
padding: 20px; | ||
background-color: #f4f4f4; | ||
} | ||
h1, h2, h3 { | ||
color: #2c3e50; | ||
} | ||
.metadata { | ||
background-color: #fff; | ||
padding: 15px; | ||
border-radius: 5px; | ||
margin-bottom: 20px; | ||
box-shadow: 0 2px 5px rgba(0,0,0,0.1); | ||
} | ||
.round { | ||
background-color: #fff; | ||
margin-bottom: 30px; | ||
border-radius: 5px; | ||
padding: 20px; | ||
box-shadow: 0 2px 5px rgba(0,0,0,0.1); | ||
} | ||
.argument { | ||
margin-bottom: 20px; | ||
padding: 15px; | ||
border-radius: 5px; | ||
} | ||
.for { | ||
background-color: #e6f3ff; | ||
} | ||
.against { | ||
background-color: #fff0f0; | ||
} | ||
.argument h3 { | ||
margin-top: 0; | ||
} | ||
</style> | ||
</head> | ||
<body> | ||
<h1>LLM Debate Results: {topic}</h1> | ||
<div class="metadata"> | ||
<h2>Debate Metadata</h2> | ||
<p><strong>Topic:</strong> {topic}</p> | ||
<p><strong>LLM Type:</strong> {llm_type}</p> | ||
<p><strong>Model:</strong> {model}</p> | ||
<p><strong>Date:</strong> {date}</p> | ||
<p><strong>Time Taken:</strong> {time_taken} seconds</p> | ||
</div> | ||
{debate_content} | ||
</body> | ||
</html> |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
import json | ||
import logging | ||
import os | ||
from datetime import datetime | ||
from typing import Any, Dict | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
def generate_filename(topic: str) -> str: | ||
"""Generate a filename based on the debate topic and current datetime.""" | ||
snake_case_topic = topic.lower().replace(" ", "_") | ||
current_time = datetime.now().strftime("%Y%m%d_%H%M%S") | ||
return f"{snake_case_topic}_{current_time}" | ||
|
||
|
||
def save_json(data: Dict[str, Any], filename: str) -> str: | ||
"""Save the debate results as a JSON file.""" | ||
file_path = f"{filename}.json" | ||
with open(file_path, "w") as f: | ||
json.dump(data, f, indent=2) | ||
return file_path | ||
|
||
|
||
def save_html(data: Dict[str, Any], filename: str) -> str: | ||
"""Save the debate results as an HTML file.""" | ||
html_content = generate_html(data) | ||
file_path = f"{filename}.html" | ||
with open(file_path, "w") as f: | ||
f.write(html_content) | ||
return file_path | ||
|
||
|
||
def generate_html(data: Dict[str, Any]) -> str: | ||
"""Generate HTML content for the debate results.""" | ||
current_dir = os.path.dirname(os.path.abspath(__file__)) | ||
template_path = os.path.join(current_dir, 'template.html') | ||
|
||
try: | ||
with open(template_path) as file: | ||
html_template = file.read() | ||
except FileNotFoundError: | ||
logger.exception("HTML template file not found") | ||
return "HTML template file not found" | ||
|
||
debate_content = "" | ||
for round_name, round_data in data['debate'].items(): | ||
round_title = round_name.replace("_", " ").title() | ||
debate_content += f'<div class="round"><h2>{round_title}</h2>' | ||
for side, argument in round_data.items(): | ||
debate_content += f''' | ||
<div class="argument {side}"> | ||
<h3>{side.capitalize()}</h3> | ||
<p>{argument}</p> | ||
</div>''' | ||
debate_content += '</div>' | ||
|
||
html_content = html_template.replace('{topic}', data['metadata']['topic']) | ||
html_content = html_content.replace('{llm_type}', data['metadata']['llm_type']) | ||
html_content = html_content.replace('{model}', data['metadata']['model']) | ||
html_content = html_content.replace('{date}', data['metadata']['date']) | ||
html_content = html_content.replace( | ||
'{time_taken}', | ||
f"{data['metadata']['time_taken']:.2f}" | ||
) | ||
html_content = html_content.replace('{debate_content}', debate_content) | ||
|
||
return html_content |
Oops, something went wrong.