@@ -110,7 +110,7 @@ export OPENAI_MODEL_NAME=meta-llama/llama-3.2-3b-instruct
110110# Option 3: For local Ollama (default)
111111export OPENAI_API_TYPE=ollama
112112export OPENAI_API_BASE=http://localhost:11434/v1
113- export OPENAI_MODEL_NAME=llama3.2:latest
113+ export OPENAI_MODEL_NAME=gemma3:12b-it-qat # VLM-Model for Image description
114114
115115# Option 4: For LM Studio
116116export OPENAI_API_TYPE=lmstudio
@@ -173,7 +173,7 @@ export OPENAI_MODEL_NAME=your-model
173173./setup_universal.sh
174174```
175175
176- #### Docker Installation (Recommended for Production)
176+ #### Docker Installation (Recommended for Production) (not tested yet)
177177
178178``` bash
179179# Clone repository
@@ -190,7 +190,7 @@ docker-compose up -d
190190# Access at http://localhost:8080
191191```
192192
193- ### Runpod Installation (For GPU Training)
193+ ### Runpod Installation (For Simple Online- GPU Training)
194194
195195** Runpod Template:**
196196```
@@ -224,7 +224,7 @@ export OPENAI_MODEL_NAME=gpt-4
224224export OPENAI_API_TYPE=openai
225225export OPENAI_API_BASE=https://api.openai.com/v1
226226export OPENAI_API_KEY=sk-...your-key...
227- export OPENAI_MODEL_NAME=gpt-4 # or gpt-3.5-turbo
227+ export OPENAI_MODEL_NAME=gpt-5-mini # or gpt-4
228228```
229229
230230#### Using OpenRouter
@@ -240,12 +240,12 @@ export OPENAI_MODEL_NAME=meta-llama/llama-3.2-3b-instruct
240240``` bash
241241# First install Ollama
242242curl -fsSL https://ollama.com/install.sh | sh
243- ollama pull llama3.2
243+ ollama pull gemma3:12b-it-qat
244244
245245# Configure OpenTuneWeaver
246246export OPENAI_API_TYPE=ollama
247247export OPENAI_API_BASE=http://localhost:11434/v1
248- export OPENAI_MODEL_NAME=llama3.2:latest
248+ export OPENAI_MODEL_NAME=gemma3:12b-it-qat
249249```
250250
251251#### Using LM Studio
0 commit comments