-
Notifications
You must be signed in to change notification settings - Fork 28
Expand file tree
/
Copy path.env.example
More file actions
38 lines (31 loc) · 1.23 KB
/
.env.example
File metadata and controls
38 lines (31 loc) · 1.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# Gateway Configuration
# This is the only port exposed externally
PORT=8080
DOCS_DIR=./data/docs # Local folder to mount into the container for ingestion
# LLM Provider Configuration
# Options: openai, ollama, anthropic, gemini, openai_compatible (for LM Studio, Llama.cpp, etc.)
LLM_PROVIDER=ollama
# Recommended for 8GB VRAM: llama3.2 (3b) or llama3 (8b)
# Note: Context window is limited to 8192 in code to prevent OOM
LLM_MODEL=llama3:latest
# Embedding Configuration
EMBEDDING_PROVIDER=ollama
EMBEDDING_MODEL=nomic-embed-text
# Ollama Configuration
# Using the ollama container from docker-compose
OLLAMA_HOST=http://ollama:11434
# OpenAI Compatible Server Configuration (for LM Studio, Llama.cpp, etc.)
# Uncomment and configure these when using LLM_PROVIDER=openai_compatible
# OPENAI_BASE_URL=http://localhost:1234/v1 # Example for LM Studio
# OPENAI_API_KEY=not-needed-for-local # Many local servers don't require API key
# OpenAI/Anthropic/Gemini Configuration (if applicable)
# OPENAI_API_KEY=your_openai_api_key_here
# ANTHROPIC_API_KEY=your_anthropic_api_key_here
# GOOGLE_API_KEY=your_google_api_key_here
# Ingestion Configuration
INGEST_BATCH_SIZE=10
CHUNK_SIZE=512
CHUNK_OVERLAP=128
# Application Settings
DEBUG=false
LOG_LEVEL=INFO