Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,25 @@ COUNCIL_MODELS = [
CHAIRMAN_MODEL = "google/gemini-3-pro-preview"
```

### 4. Custom Self-Hosted Models (Optional)

You can also add custom self-hosted models (e.g., Ollama, vLLM) by configuring `CUSTOM_MODELS` in `backend/config.py`:

```python
CUSTOM_MODELS = {
"ollama/llama3": {
"api_url": "http://localhost:11434/v1/chat/completions",
"api_key": "ollama" # Optional, defaults to "custom"
}
}

# Don't forget to add it to the council list!
COUNCIL_MODELS = [
# ... other models
"ollama/llama3",
]
```

## Running the Application

**Option 1: Use the start script**
Expand Down
12 changes: 11 additions & 1 deletion backend/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,19 @@
"openai/gpt-5.1",
"google/gemini-3-pro-preview",
"anthropic/claude-sonnet-4.5",
"x-ai/grok-4",
"x-ai/grok-4"
]

# Custom models configuration
# Format: { "model_id": { "api_url": "...", "api_key": "..." } }
CUSTOM_MODELS = {
# Example for a local Ollama model:
# "ollama/llama3": {
# "api_url": "http://localhost:11434/v1/chat/completions",
# "api_key": "ollama" # key is often ignored by local servers but required by some clients
# }
}

# Chairman model - synthesizes final response
CHAIRMAN_MODEL = "google/gemini-3-pro-preview"

Expand Down
18 changes: 13 additions & 5 deletions backend/openrouter.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import httpx
from typing import List, Dict, Any, Optional
from .config import OPENROUTER_API_KEY, OPENROUTER_API_URL
from .config import OPENROUTER_API_KEY, OPENROUTER_API_URL, CUSTOM_MODELS


async def query_model(
Expand All @@ -11,18 +11,26 @@ async def query_model(
timeout: float = 120.0
) -> Optional[Dict[str, Any]]:
"""
Query a single model via OpenRouter API.
Query a single model via OpenRouter API or a custom endpoint.

Args:
model: OpenRouter model identifier (e.g., "openai/gpt-4o")
model: Model identifier (e.g., "openai/gpt-4o" or custom ID)
messages: List of message dicts with 'role' and 'content'
timeout: Request timeout in seconds

Returns:
Response dict with 'content' and optional 'reasoning_details', or None if failed
"""
if model in CUSTOM_MODELS:
config = CUSTOM_MODELS[model]
api_url = config["api_url"]
api_key = config.get("api_key", "custom") # Default to dummy key if not provided
else:
api_url = OPENROUTER_API_URL
api_key = OPENROUTER_API_KEY

headers = {
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}

Expand All @@ -34,7 +42,7 @@ async def query_model(
try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(
OPENROUTER_API_URL,
api_url,
headers=headers,
json=payload
)
Expand Down