Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
152 changes: 152 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# ================================================================================
# LLM COUNCIL - Environment Configuration
# ================================================================================
#
# SECURITY WARNING:
# - Never commit the actual .env file to version control
# - Keep your API keys secret and secure
# - The .env file is already listed in .gitignore
#
# SETUP INSTRUCTIONS:
# 1. Copy this file to .env: cp .env.example .env
# 2. Fill in your actual values in the .env file
# 3. Choose your provider mode and configure accordingly
#
# ================================================================================

# --------------------------------------------------------------------------------
# LLM PROVIDER MODE
# --------------------------------------------------------------------------------
# Determines which LLM provider(s) to use for the council
#
# Valid options:
# - openrouter: Use OpenRouter API for all models (cloud-based, requires API key)
# - ollama: Use local Ollama server for all models (self-hosted, free)
# - mixed: Use both providers with explicit prefixes per model
#
# Default: openrouter (for backward compatibility)
# --------------------------------------------------------------------------------
LLM_PROVIDER=openrouter


# --------------------------------------------------------------------------------
# OPENROUTER CONFIGURATION
# --------------------------------------------------------------------------------
# Required for: 'openrouter' and 'mixed' modes
# Get your API key from: https://openrouter.ai/keys
#
# SECURITY: This is a secret key - never share it or commit it to git!
# --------------------------------------------------------------------------------
OPENROUTER_API_KEY=your_openrouter_api_key_here


# --------------------------------------------------------------------------------
# OLLAMA CONFIGURATION
# --------------------------------------------------------------------------------
# Required for: 'ollama' and 'mixed' modes
# Default: http://localhost:11434 (standard Ollama installation)
#
# Change this if:
# - Running Ollama on a different port
# - Using a remote Ollama server
# - Using Docker with custom networking
# --------------------------------------------------------------------------------
OLLAMA_BASE_URL=http://localhost:11434


# ================================================================================
# CONFIGURATION EXAMPLES BY MODE
# ================================================================================
#
# The council models and chairman are configured in backend/config.py, but here
# are examples of how to set up each mode:
#
# --------------------------------------------------------------------------------
# EXAMPLE 1: OpenRouter Mode (Cloud-based)
# --------------------------------------------------------------------------------
# LLM_PROVIDER=openrouter
# OPENROUTER_API_KEY=sk-or-v1-your-actual-key-here
#
# In backend/config.py, use models like:
# COUNCIL_MODELS = [
# "openai/gpt-4",
# "openai/gpt-5.1",
# "google/gemini-3-pro-preview",
# "anthropic/claude-sonnet-4.5",
# "x-ai/grok-4"
# ]
# CHAIRMAN_MODEL = "google/gemini-3-pro-preview"
#
# Available OpenRouter models: https://openrouter.ai/models
#
# --------------------------------------------------------------------------------
# EXAMPLE 2: Ollama Mode (Local/Self-hosted)
# --------------------------------------------------------------------------------
# LLM_PROVIDER=ollama
# OLLAMA_BASE_URL=http://localhost:11434
#
# In backend/config.py, use models like:
# COUNCIL_MODELS = [
# "llama3.1:8b",
# "mistral:latest",
# "qwen2.5:3b",
# "phi3:latest"
# ]
# CHAIRMAN_MODEL = "llama3.1:8b"
#
# Note: You must have these models installed locally via:
# ollama pull llama3.1:8b
# ollama pull mistral:latest
# (etc.)
#
# Available Ollama models: https://ollama.ai/library
#
# --------------------------------------------------------------------------------
# EXAMPLE 3: Mixed Mode (Hybrid Cloud + Local)
# --------------------------------------------------------------------------------
# LLM_PROVIDER=mixed
# OPENROUTER_API_KEY=sk-or-v1-your-actual-key-here
# OLLAMA_BASE_URL=http://localhost:11434
#
# In backend/config.py, prefix each model with provider:
# COUNCIL_MODELS = [
# "ollama:llama3.1:8b", # Local model (fast, free)
# "ollama:mistral:latest", # Local model (fast, free)
# "openrouter:google/gemini-2.5-flash-lite", # Cloud model (paid)
# "openrouter:anthropic/claude-3.5-haiku" # Cloud model (paid)
# ]
# CHAIRMAN_MODEL = "openrouter:google/gemini-2.5-flash-lite"
#
# Benefits of mixed mode:
# - Use free local models for bulk processing
# - Use premium cloud models for final synthesis
# - Optimize cost vs quality trade-offs
#
# ================================================================================


# --------------------------------------------------------------------------------
# ADDITIONAL NOTES
# --------------------------------------------------------------------------------
#
# MODEL NAMING CONVENTIONS:
# - OpenRouter: Uses "provider/model-name" format (e.g., "openai/gpt-4")
# - Ollama: Uses "model-name:tag" format (e.g., "llama3.1:8b")
# - Mixed mode: Uses "provider:model-identifier" format
#
# COST CONSIDERATIONS:
# - OpenRouter charges per token (varies by model)
# - Ollama is free but requires local compute resources
# - Mixed mode allows cost optimization strategies
#
# PERFORMANCE:
# - OpenRouter: Fast API, no local setup required
# - Ollama: Speed depends on hardware, no network latency
# - Mixed mode: Balance based on your infrastructure
#
# PRIVACY:
# - OpenRouter: Data sent to third-party cloud services
# - Ollama: All processing happens locally (fully private)
# - Mixed mode: Be aware which models process sensitive data
#
# ================================================================================
9 changes: 8 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,11 @@ data/
# Frontend
frontend/node_modules/
frontend/dist/
frontend/.vite/
frontend/.vite/

#superclaude settings
.claude
.serena

#backlog.md files
backlog/
Loading