Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions .docker/backend.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Backend Dockerfile for LLM Council
FROM python:3.11-slim

# Install uv
RUN pip install --no-cache-dir uv

# Set working directory
WORKDIR /app

# Copy dependency files
COPY pyproject.toml uv.lock ./

# Install dependencies using uv
RUN uv sync --frozen

# Copy backend source code
COPY backend/ ./backend/

# Expose port
EXPOSE 8001

# Run the application
CMD ["uv", "run", "python", "-m", "backend.main"]

45 changes: 45 additions & 0 deletions .docker/frontend.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Frontend Dockerfile for LLM Council
# Build stage
FROM node:22-alpine AS builder

WORKDIR /app

# Copy package files
COPY frontend/package.json frontend/package-lock.json ./

# Install dependencies
RUN npm ci

# Copy frontend source code
COPY frontend/ ./

# Build argument for API URL
ARG VITE_API_URL=http://backend:8001
ENV VITE_API_URL=$VITE_API_URL

# Build the application
RUN npm run build

# Production stage
FROM nginx:alpine

# Copy built files from builder stage
COPY --from=builder /app/dist /usr/share/nginx/html

# Copy nginx configuration for SPA
RUN echo 'server { \
listen 80; \
server_name _; \
root /usr/share/nginx/html; \
index index.html; \
location / { \
try_files $uri $uri/ /index.html; \
} \
}' > /etc/nginx/conf.d/default.conf

# Expose port
EXPOSE 80

# Start nginx
CMD ["nginx", "-g", "daemon off;"]

53 changes: 53 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# Git
.git
.gitignore

# Python
__pycache__
*.pyc
*.pyo
*.pyd
.Python
*.so
*.egg
*.egg-info
dist
build
.venv
venv
env

# Node
node_modules
npm-debug.log
yarn-error.log

# IDE
.vscode
.idea
*.swp
*.swo
*~

# OS
.DS_Store
Thumbs.db

# Project specific
data/
.env
*.log

# Docker
.docker/
docker-compose.yml
Dockerfile*

# Documentation
*.md
!README.md

# Other
header.jpg
CLAUDE.md

152 changes: 152 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# ================================================================================
# LLM COUNCIL - Environment Configuration
# ================================================================================
#
# SECURITY WARNING:
# - Never commit the actual .env file to version control
# - Keep your API keys secret and secure
# - The .env file is already listed in .gitignore
#
# SETUP INSTRUCTIONS:
# 1. Copy this file to .env: cp .env.example .env
# 2. Fill in your actual values in the .env file
# 3. Choose your provider mode and configure accordingly
#
# ================================================================================

# --------------------------------------------------------------------------------
# LLM PROVIDER MODE
# --------------------------------------------------------------------------------
# Determines which LLM provider(s) to use for the council
#
# Valid options:
# - openrouter: Use OpenRouter API for all models (cloud-based, requires API key)
# - ollama: Use local Ollama server for all models (self-hosted, free)
# - mixed: Use both providers with explicit prefixes per model
#
# Default: openrouter (for backward compatibility)
# --------------------------------------------------------------------------------
LLM_PROVIDER=openrouter


# --------------------------------------------------------------------------------
# OPENROUTER CONFIGURATION
# --------------------------------------------------------------------------------
# Required for: 'openrouter' and 'mixed' modes
# Get your API key from: https://openrouter.ai/keys
#
# SECURITY: This is a secret key - never share it or commit it to git!
# --------------------------------------------------------------------------------
OPENROUTER_API_KEY=your_openrouter_api_key_here


# --------------------------------------------------------------------------------
# OLLAMA CONFIGURATION
# --------------------------------------------------------------------------------
# Required for: 'ollama' and 'mixed' modes
# Default: http://localhost:11434 (standard Ollama installation)
#
# Change this if:
# - Running Ollama on a different port
# - Using a remote Ollama server
# - Using Docker with custom networking
# --------------------------------------------------------------------------------
OLLAMA_BASE_URL=http://localhost:11434


# ================================================================================
# CONFIGURATION EXAMPLES BY MODE
# ================================================================================
#
# The council models and chairman are configured in backend/config.py, but here
# are examples of how to set up each mode:
#
# --------------------------------------------------------------------------------
# EXAMPLE 1: OpenRouter Mode (Cloud-based)
# --------------------------------------------------------------------------------
# LLM_PROVIDER=openrouter
# OPENROUTER_API_KEY=sk-or-v1-your-actual-key-here
#
# In backend/config.py, use models like:
# COUNCIL_MODELS = [
# "openai/gpt-4",
# "openai/gpt-5.1",
# "google/gemini-3-pro-preview",
# "anthropic/claude-sonnet-4.5",
# "x-ai/grok-4"
# ]
# CHAIRMAN_MODEL = "google/gemini-3-pro-preview"
#
# Available OpenRouter models: https://openrouter.ai/models
#
# --------------------------------------------------------------------------------
# EXAMPLE 2: Ollama Mode (Local/Self-hosted)
# --------------------------------------------------------------------------------
# LLM_PROVIDER=ollama
# OLLAMA_BASE_URL=http://localhost:11434
#
# In backend/config.py, use models like:
# COUNCIL_MODELS = [
# "llama3.1:8b",
# "mistral:latest",
# "qwen2.5:3b",
# "phi3:latest"
# ]
# CHAIRMAN_MODEL = "llama3.1:8b"
#
# Note: You must have these models installed locally via:
# ollama pull llama3.1:8b
# ollama pull mistral:latest
# (etc.)
#
# Available Ollama models: https://ollama.ai/library
#
# --------------------------------------------------------------------------------
# EXAMPLE 3: Mixed Mode (Hybrid Cloud + Local)
# --------------------------------------------------------------------------------
# LLM_PROVIDER=mixed
# OPENROUTER_API_KEY=sk-or-v1-your-actual-key-here
# OLLAMA_BASE_URL=http://localhost:11434
#
# In backend/config.py, prefix each model with provider:
# COUNCIL_MODELS = [
# "ollama:llama3.1:8b", # Local model (fast, free)
# "ollama:mistral:latest", # Local model (fast, free)
# "openrouter:google/gemini-2.5-flash-lite", # Cloud model (paid)
# "openrouter:anthropic/claude-3.5-haiku" # Cloud model (paid)
# ]
# CHAIRMAN_MODEL = "openrouter:google/gemini-2.5-flash-lite"
#
# Benefits of mixed mode:
# - Use free local models for bulk processing
# - Use premium cloud models for final synthesis
# - Optimize cost vs quality trade-offs
#
# ================================================================================


# --------------------------------------------------------------------------------
# ADDITIONAL NOTES
# --------------------------------------------------------------------------------
#
# MODEL NAMING CONVENTIONS:
# - OpenRouter: Uses "provider/model-name" format (e.g., "openai/gpt-4")
# - Ollama: Uses "model-name:tag" format (e.g., "llama3.1:8b")
# - Mixed mode: Uses "provider:model-identifier" format
#
# COST CONSIDERATIONS:
# - OpenRouter charges per token (varies by model)
# - Ollama is free but requires local compute resources
# - Mixed mode allows cost optimization strategies
#
# PERFORMANCE:
# - OpenRouter: Fast API, no local setup required
# - Ollama: Speed depends on hardware, no network latency
# - Mixed mode: Balance based on your infrastructure
#
# PRIVACY:
# - OpenRouter: Data sent to third-party cloud services
# - Ollama: All processing happens locally (fully private)
# - Mixed mode: Be aware which models process sensitive data
#
# ================================================================================
9 changes: 8 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,11 @@ data/
# Frontend
frontend/node_modules/
frontend/dist/
frontend/.vite/
frontend/.vite/

#superclaude settings
.claude
.serena

#backlog.md files
backlog/
Loading