Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions backend/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,18 @@
# OpenRouter API key
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")

# Tavily API key for web search
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")

# Enable/disable web search tool
ENABLE_WEB_SEARCH = os.getenv("ENABLE_WEB_SEARCH", "true").lower() == "true"

# Council members - list of OpenRouter model identifiers
COUNCIL_MODELS = [
"openai/gpt-5.1",
"openai/gpt-5.2",
"google/gemini-3-pro-preview",
"anthropic/claude-sonnet-4.5",
"x-ai/grok-4",
"anthropic/claude-opus-4.5",
"x-ai/grok-4.1-fast",
]

# Chairman model - synthesizes final response
Expand Down
34 changes: 24 additions & 10 deletions backend/council.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from typing import List, Dict, Any, Tuple
from .openrouter import query_models_parallel, query_model
from .config import COUNCIL_MODELS, CHAIRMAN_MODEL
from .tools import get_available_tools


async def stage1_collect_responses(user_query: str) -> List[Dict[str, Any]]:
Expand All @@ -13,21 +14,26 @@ async def stage1_collect_responses(user_query: str) -> List[Dict[str, Any]]:
user_query: The user's question

Returns:
List of dicts with 'model' and 'response' keys
List of dicts with 'model', 'response', and optional 'tool_calls_made' keys
"""
messages = [{"role": "user", "content": user_query}]
tools = get_available_tools()

# Query all models in parallel
responses = await query_models_parallel(COUNCIL_MODELS, messages)
# Query all models in parallel with tools
responses = await query_models_parallel(COUNCIL_MODELS, messages, tools=tools)

# Format results
stage1_results = []
for model, response in responses.items():
if response is not None: # Only include successful responses
stage1_results.append({
result = {
"model": model,
"response": response.get('content', '')
})
}
# Include tool calls if any were made
if response.get('tool_calls_made'):
result['tool_calls_made'] = response['tool_calls_made']
stage1_results.append(result)

return stage1_results

Expand Down Expand Up @@ -93,9 +99,10 @@ async def stage2_collect_rankings(
Now provide your evaluation and ranking:"""

messages = [{"role": "user", "content": ranking_prompt}]
tools = get_available_tools()

# Get rankings from all council models in parallel
responses = await query_models_parallel(COUNCIL_MODELS, messages)
# Get rankings from all council models in parallel with tools
responses = await query_models_parallel(COUNCIL_MODELS, messages, tools=tools)

# Format results
stage2_results = []
Expand Down Expand Up @@ -157,9 +164,10 @@ async def stage3_synthesize_final(
Provide a clear, well-reasoned final answer that represents the council's collective wisdom:"""

messages = [{"role": "user", "content": chairman_prompt}]
tools = get_available_tools()

# Query the chairman model
response = await query_model(CHAIRMAN_MODEL, messages)
# Query the chairman model with tools
response = await query_model(CHAIRMAN_MODEL, messages, tools=tools)

if response is None:
# Fallback if chairman fails
Expand All @@ -168,11 +176,17 @@ async def stage3_synthesize_final(
"response": "Error: Unable to generate final synthesis."
}

return {
result = {
"model": CHAIRMAN_MODEL,
"response": response.get('content', '')
}

# Include tool calls if chairman used any
if response.get('tool_calls_made'):
result['tool_calls_made'] = response['tool_calls_made']

return result


def parse_ranking_from_text(ranking_text: str) -> List[str]:
"""
Expand Down
119 changes: 97 additions & 22 deletions backend/openrouter.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,124 @@
"""OpenRouter API client for making LLM requests."""

import json
import httpx
from typing import List, Dict, Any, Optional
from .config import OPENROUTER_API_KEY, OPENROUTER_API_URL


async def query_model(
model: str,
messages: List[Dict[str, str]],
timeout: float = 120.0
messages: List[Dict[str, Any]],
timeout: float = 120.0,
tools: Optional[List[Dict[str, Any]]] = None,
max_tool_iterations: int = 5
) -> Optional[Dict[str, Any]]:
"""
Query a single model via OpenRouter API.
Query a single model via OpenRouter API with optional tool support.

Args:
model: OpenRouter model identifier (e.g., "openai/gpt-4o")
messages: List of message dicts with 'role' and 'content'
timeout: Request timeout in seconds
tools: Optional list of tool definitions (OpenAI function calling format)
max_tool_iterations: Maximum number of tool call iterations to prevent infinite loops

Returns:
Response dict with 'content' and optional 'reasoning_details', or None if failed
Response dict with 'content', optional 'reasoning_details',
and optional 'tool_calls_made', or None if failed
"""
from .tools import execute_tool

headers = {
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json",
}

payload = {
"model": model,
"messages": messages,
}
# Work with a copy of messages to avoid mutating the original
current_messages = [msg.copy() for msg in messages]
tool_calls_made = []

try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(
OPENROUTER_API_URL,
headers=headers,
json=payload
)
response.raise_for_status()

data = response.json()
message = data['choices'][0]['message']

for _ in range(max_tool_iterations):
payload = {
"model": model,
"messages": current_messages,
}

# Add tools to payload if provided
if tools:
payload["tools"] = tools
payload["tool_choice"] = "auto"

response = await client.post(
OPENROUTER_API_URL,
headers=headers,
json=payload
)
response.raise_for_status()

data = response.json()
choice = data['choices'][0]
message = choice['message']
finish_reason = choice.get('finish_reason')

# Check if model wants to call tools
if message.get('tool_calls') or finish_reason == 'tool_calls':
tool_calls = message.get('tool_calls', [])

if not tool_calls:
# No tool calls despite finish_reason, return content
break

# Add assistant message with tool calls to conversation
current_messages.append(message)

# Execute each tool call
for tool_call in tool_calls:
tool_name = tool_call['function']['name']
try:
tool_args = json.loads(tool_call['function']['arguments'])
except json.JSONDecodeError:
tool_args = {}

# Execute the tool
result = await execute_tool(tool_name, tool_args)

# Track tool usage
tool_calls_made.append({
'tool': tool_name,
'args': tool_args,
'result': result
})

# Add tool result to messages
current_messages.append({
'role': 'tool',
'tool_call_id': tool_call['id'],
'content': json.dumps(result)
})

# Continue loop to get model's response with tool results
continue
else:
# Model returned regular content, we're done
result = {
'content': message.get('content'),
'reasoning_details': message.get('reasoning_details')
}

# Include tool calls made if any
if tool_calls_made:
result['tool_calls_made'] = tool_calls_made

return result

# Max iterations reached, return whatever content we have
return {
'content': message.get('content'),
'reasoning_details': message.get('reasoning_details')
'content': message.get('content', ''),
'reasoning_details': message.get('reasoning_details'),
'tool_calls_made': tool_calls_made if tool_calls_made else None
}

except Exception as e:
Expand All @@ -55,22 +128,24 @@ async def query_model(

async def query_models_parallel(
models: List[str],
messages: List[Dict[str, str]]
messages: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None
) -> Dict[str, Optional[Dict[str, Any]]]:
"""
Query multiple models in parallel.

Args:
models: List of OpenRouter model identifiers
messages: List of message dicts to send to each model
tools: Optional list of tool definitions to pass to each model

Returns:
Dict mapping model identifier to response dict (or None if failed)
"""
import asyncio

# Create tasks for all models
tasks = [query_model(model, messages) for model in models]
tasks = [query_model(model, messages, tools=tools) for model in models]

# Wait for all to complete
responses = await asyncio.gather(*tasks)
Expand Down
103 changes: 103 additions & 0 deletions backend/tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
"""Tool definitions and execution for LLM Council."""

from typing import Dict, Any, List
from .config import TAVILY_API_KEY, ENABLE_WEB_SEARCH

# Tavily web search tool definition (OpenAI function calling format)
TAVILY_SEARCH_TOOL = {
"type": "function",
"function": {
"name": "web_search",
"description": "Search the web for current information. Use this when the question requires up-to-date information, recent news, current events, or facts that may have changed recently.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query to execute"
},
"max_results": {
"type": "integer",
"description": "Maximum number of results to return (1-10)",
"default": 5
}
},
"required": ["query"]
}
}
}


def get_available_tools() -> List[Dict[str, Any]]:
"""Get list of available tools based on configuration."""
tools: List[Dict[str, Any]] = []
if ENABLE_WEB_SEARCH and TAVILY_API_KEY:
tools.append(TAVILY_SEARCH_TOOL)
return tools


async def execute_tool(tool_name: str, tool_args: Dict[str, Any]) -> Dict[str, Any]:
"""
Execute a tool by name with given arguments.

Args:
tool_name: Name of the tool to execute
tool_args: Arguments to pass to the tool

Returns:
Tool execution result as a dict
"""
if tool_name == "web_search":
return await execute_web_search(**tool_args)
else:
return {"error": f"Unknown tool: {tool_name}"}


async def execute_web_search(
query: str,
max_results: int = 5
) -> Dict[str, Any]:
"""
Execute a web search using Tavily API.

Args:
query: The search query
max_results: Maximum number of results (1-10)

Returns:
Search results as a dict
"""
if not TAVILY_API_KEY:
return {"error": "Tavily API key not configured"}

try:
from tavily import AsyncTavilyClient

client = AsyncTavilyClient(api_key=TAVILY_API_KEY)

# Clamp max_results to valid range
max_results = max(1, min(10, max_results))

response = await client.search(
query=query,
max_results=max_results,
include_answer=True
)

# Format results for the model
results = []
for result in response.get("results", []):
results.append({
"title": result.get("title", ""),
"url": result.get("url", ""),
"content": result.get("content", "")
})

return {
"query": query,
"answer": response.get("answer"),
"results": results
}

except Exception as e:
return {"error": f"Search failed: {str(e)}"}
Loading