diff --git a/electron/LLMHelper.ts b/electron/LLMHelper.ts index 5edd592b..2694d889 100644 --- a/electron/LLMHelper.ts +++ b/electron/LLMHelper.ts @@ -25,7 +25,7 @@ export class LLMHelper { this.initializeOllamaModel() } else if (apiKey) { const genAI = new GoogleGenerativeAI(apiKey) - this.model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" }) + this.model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" }) console.log("[LLMHelper] Using Google Gemini") } else { throw new Error("Either provide Gemini API key or enable Ollama mode") @@ -298,7 +298,7 @@ export class LLMHelper { } public getCurrentModel(): string { - return this.useOllama ? this.ollamaModel : "gemini-2.0-flash"; + return this.useOllama ? this.ollamaModel : "gemini-2.5-flash"; } public async switchToOllama(model?: string, url?: string): Promise { @@ -318,7 +318,7 @@ export class LLMHelper { public async switchToGemini(apiKey?: string): Promise { if (apiKey) { const genAI = new GoogleGenerativeAI(apiKey); - this.model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" }); + this.model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" }); } if (!this.model && !apiKey) { diff --git a/src/components/ui/ModelSelector.tsx b/src/components/ui/ModelSelector.tsx index 49cf1d94..c6bb62d4 100644 --- a/src/components/ui/ModelSelector.tsx +++ b/src/components/ui/ModelSelector.tsx @@ -87,7 +87,7 @@ const ModelSelector: React.FC = ({ onModelChange, onChatOpen if (result.success) { await loadCurrentConfig(); setConnectionStatus('success'); - onModelChange?.(selectedProvider, selectedProvider === 'ollama' ? selectedOllamaModel : 'gemini-2.0-flash'); + onModelChange?.(selectedProvider, selectedProvider === 'ollama' ? selectedOllamaModel : 'gemini-2.5-flash'); // Auto-open chat window after successful model change setTimeout(() => { onChatOpen?.();