diff --git a/electron/LLMHelper.ts b/electron/LLMHelper.ts index 5edd592b..b04d08fc 100644 --- a/electron/LLMHelper.ts +++ b/electron/LLMHelper.ts @@ -8,7 +8,25 @@ interface OllamaResponse { export class LLMHelper { private model: GenerativeModel | null = null - private readonly systemPrompt = `You are Wingman AI, a helpful, proactive assistant for any kind of problem or situation (not just coding). For any user input, analyze the situation, provide a clear problem statement, relevant context, and suggest several possible responses or actions the user could take next. Always explain your reasoning. Present your suggestions as a list of options or next steps.` + private readonly systemPrompt = `You're a real-time assistant that gives the user info during meetings and other workflows. Your goal is to answer the user's query directly. + +Responses must be EXTREMELY short and terse + +- Aim for 1-2 sentences, and if longer, use bullet points for structure +- Get straight to the point and NEVER add filler, preamble, or meta-comments +- Never give the user a direct script or word track to say, your responses must be informative +- Don't end with a question or prompt to the user +- If an example story is needed, give one specific example story without making up details +- If a response calls for code, write all code required with detailed comments and give two lines space b/w lines of code. + +Tone must be natural, human, and conversational + +- Never be robotic or overly formal +- Use contractions naturally (“it's” not “it is”) +- Occasionally start with “And” or “But” or use a sentence fragment for flow +- NEVER use hyphens or dashes, split into shorter sentences or use commas +- Avoid unnecessary adjectives or dramatic emphasis unless it adds clear value` + private useOllama: boolean = false private ollamaModel: string = "llama3.2" private ollamaUrl: string = "http://localhost:11434" @@ -18,14 +36,12 @@ export class LLMHelper { if (useOllama) { this.ollamaUrl = ollamaUrl || "http://localhost:11434" - this.ollamaModel = ollamaModel || "gemma:latest" // Default fallback + this.ollamaModel = ollamaModel || "gemma:latest" console.log(`[LLMHelper] Using Ollama with model: ${this.ollamaModel}`) - - // Auto-detect and use first available model if specified model doesn't exist this.initializeOllamaModel() } else if (apiKey) { const genAI = new GoogleGenerativeAI(apiKey) - this.model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" }) + this.model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" }) console.log("[LLMHelper] Using Google Gemini") } else { throw new Error("Either provide Gemini API key or enable Ollama mode") @@ -43,28 +59,20 @@ export class LLMHelper { } private cleanJsonResponse(text: string): string { - // Remove markdown code block syntax if present - text = text.replace(/^```(?:json)?\n/, '').replace(/\n```$/, ''); - // Remove any leading/trailing whitespace - text = text.trim(); - return text; + text = text.replace(/^```(?:json)?\n/, '').replace(/\n```$/, '') + return text.trim() } private async callOllama(prompt: string): Promise { try { const response = await fetch(`${this.ollamaUrl}/api/generate`, { method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, + headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ model: this.ollamaModel, prompt: prompt, stream: false, - options: { - temperature: 0.7, - top_p: 0.9, - } + options: { temperature: 0.7, top_p: 0.9 } }), }) @@ -74,7 +82,7 @@ export class LLMHelper { const data: OllamaResponse = await response.json() return data.response - } catch (error) { + } catch (error: any) { console.error("[LLMHelper] Error calling Ollama:", error) throw new Error(`Failed to connect to Ollama: ${error.message}. Make sure Ollama is running on ${this.ollamaUrl}`) } @@ -96,26 +104,21 @@ export class LLMHelper { console.warn("[LLMHelper] No Ollama models found") return } - - // Check if current model exists, if not use the first available if (!availableModels.includes(this.ollamaModel)) { this.ollamaModel = availableModels[0] console.log(`[LLMHelper] Auto-selected first available model: ${this.ollamaModel}`) } - - // Test the selected model works - const testResult = await this.callOllama("Hello") + await this.callOllama("Hello") console.log(`[LLMHelper] Successfully initialized with model: ${this.ollamaModel}`) - } catch (error) { + } catch (error: any) { console.error(`[LLMHelper] Failed to initialize Ollama model: ${error.message}`) - // Try to use first available model as fallback try { const models = await this.getOllamaModels() if (models.length > 0) { this.ollamaModel = models[0] console.log(`[LLMHelper] Fallback to: ${this.ollamaModel}`) } - } catch (fallbackError) { + } catch (fallbackError: any) { console.error(`[LLMHelper] Fallback also failed: ${fallbackError.message}`) } } @@ -124,14 +127,7 @@ export class LLMHelper { public async extractProblemFromImages(imagePaths: string[]) { try { const imageParts = await Promise.all(imagePaths.map(path => this.fileToGenerativePart(path))) - - const prompt = `${this.systemPrompt}\n\nYou are a wingman. Please analyze these images and extract the following information in JSON format:\n{ - "problem_statement": "A clear statement of the problem or situation depicted in the images.", - "context": "Relevant background or context from the images.", - "suggested_responses": ["First possible answer or action", "Second possible answer or action", "..."], - "reasoning": "Explanation of why these suggestions are appropriate." -}\nImportant: Return ONLY the JSON object, without any markdown formatting or code blocks.` - + const prompt = `${this.systemPrompt}\n\nThe images contain a problem or question. Extract the problem and provide a complete solution. For coding problems, provide working code as the final answer. For math/logic/MCQ questions, provide step-by-step solution and final answer.` const result = await this.model.generateContent([prompt, ...imageParts]) const response = await result.response const text = this.cleanJsonResponse(response.text()) @@ -143,45 +139,26 @@ export class LLMHelper { } public async generateSolution(problemInfo: any) { - const prompt = `${this.systemPrompt}\n\nGiven this problem or situation:\n${JSON.stringify(problemInfo, null, 2)}\n\nPlease provide your response in the following JSON format:\n{ - "solution": { - "code": "The code or main answer here.", - "problem_statement": "Restate the problem or situation.", - "context": "Relevant background/context.", - "suggested_responses": ["First possible answer or action", "Second possible answer or action", "..."], - "reasoning": "Explanation of why these suggestions are appropriate." - } -}\nImportant: Return ONLY the JSON object, without any markdown formatting or code blocks.` - - console.log("[LLMHelper] Calling Gemini LLM for solution..."); + const prompt = `${this.systemPrompt}\n\nGiven this problem or situation:\n${JSON.stringify(problemInfo, null, 2)}\n\nExtract the problem and provide a complete solution. For coding problems, provide working code as the final answer. For math/logic/MCQ questions, provide step-by-step solution and final answer.` + console.log("[LLMHelper] Calling Gemini LLM for solution...") try { const result = await this.model.generateContent(prompt) - console.log("[LLMHelper] Gemini LLM returned result."); + console.log("[LLMHelper] Gemini LLM returned result.") const response = await result.response const text = this.cleanJsonResponse(response.text()) const parsed = JSON.parse(text) console.log("[LLMHelper] Parsed LLM response:", parsed) return parsed } catch (error) { - console.error("[LLMHelper] Error in generateSolution:", error); - throw error; + console.error("[LLMHelper] Error in generateSolution:", error) + throw error } } public async debugSolutionWithImages(problemInfo: any, currentCode: string, debugImagePaths: string[]) { try { const imageParts = await Promise.all(debugImagePaths.map(path => this.fileToGenerativePart(path))) - - const prompt = `${this.systemPrompt}\n\nYou are a wingman. Given:\n1. The original problem or situation: ${JSON.stringify(problemInfo, null, 2)}\n2. The current response or approach: ${currentCode}\n3. The debug information in the provided images\n\nPlease analyze the debug information and provide feedback in this JSON format:\n{ - "solution": { - "code": "The code or main answer here.", - "problem_statement": "Restate the problem or situation.", - "context": "Relevant background/context.", - "suggested_responses": ["First possible answer or action", "Second possible answer or action", "..."], - "reasoning": "Explanation of why these suggestions are appropriate." - } -}\nImportant: Return ONLY the JSON object, without any markdown formatting or code blocks.` - + const prompt = `${this.systemPrompt}\n\nGiven:\n1. The original problem or situation: ${JSON.stringify(problemInfo, null, 2)}\n2. The current response or approach: ${currentCode}\n3. The debug information in the provided images\n\nExtract the problem and provide a complete solution. For coding problems, provide working code as the final answer. For math/logic/MCQ questions, provide step-by-step solution and final answer.` const result = await this.model.generateContent([prompt, ...imageParts]) const response = await result.response const text = this.cleanJsonResponse(response.text()) @@ -196,165 +173,129 @@ export class LLMHelper { public async analyzeAudioFile(audioPath: string) { try { - const audioData = await fs.promises.readFile(audioPath); - const audioPart = { - inlineData: { - data: audioData.toString("base64"), - mimeType: "audio/mp3" - } - }; - const prompt = `${this.systemPrompt}\n\nDescribe this audio clip in a short, concise answer. In addition to your main answer, suggest several possible actions or responses the user could take next based on the audio. Do not return a structured JSON object, just answer naturally as you would to a user.`; - const result = await this.model.generateContent([prompt, audioPart]); - const response = await result.response; - const text = response.text(); - return { text, timestamp: Date.now() }; + const audioData = await fs.promises.readFile(audioPath) + const audioPart = { inlineData: { data: audioData.toString("base64"), mimeType: "audio/mp3" } } + const prompt = `${this.systemPrompt}\n\nThe audio contains a problem or question. Extract the problem and provide a complete solution. For coding problems, provide working code as the final answer. For math/logic/MCQ questions, provide step-by-step solution and final answer.` + const result = await this.model.generateContent([prompt, audioPart]) + const response = await result.response + const text = response.text() + return { text, timestamp: Date.now() } } catch (error) { - console.error("Error analyzing audio file:", error); - throw error; + console.error("Error analyzing audio file:", error) + throw error } } public async analyzeAudioFromBase64(data: string, mimeType: string) { try { - const audioPart = { - inlineData: { - data, - mimeType - } - }; - const prompt = `${this.systemPrompt}\n\nDescribe this audio clip in a short, concise answer. In addition to your main answer, suggest several possible actions or responses the user could take next based on the audio. Do not return a structured JSON object, just answer naturally as you would to a user and be concise.`; - const result = await this.model.generateContent([prompt, audioPart]); - const response = await result.response; - const text = response.text(); - return { text, timestamp: Date.now() }; + const audioPart = { inlineData: { data, mimeType } } + const prompt = `${this.systemPrompt}\n\nThe audio contains a problem or question. Extract the problem and provide a complete solution. For coding problems, provide working code as the final answer. For math/logic/MCQ questions, provide step-by-step solution and final answer.` + const result = await this.model.generateContent([prompt, audioPart]) + const response = await result.response + const text = response.text() + return { text, timestamp: Date.now() } } catch (error) { - console.error("Error analyzing audio from base64:", error); - throw error; + console.error("Error analyzing audio from base64:", error) + throw error } } public async analyzeImageFile(imagePath: string) { try { - const imageData = await fs.promises.readFile(imagePath); - const imagePart = { - inlineData: { - data: imageData.toString("base64"), - mimeType: "image/png" - } - }; - const prompt = `${this.systemPrompt}\n\nDescribe the content of this image in a short, concise answer. In addition to your main answer, suggest several possible actions or responses the user could take next based on the image. Do not return a structured JSON object, just answer naturally as you would to a user. Be concise and brief.`; - const result = await this.model.generateContent([prompt, imagePart]); - const response = await result.response; - const text = response.text(); - return { text, timestamp: Date.now() }; + const imageData = await fs.promises.readFile(imagePath) + const imagePart = { inlineData: { data: imageData.toString("base64"), mimeType: "image/png" } } + const prompt = `${this.systemPrompt}\n\nThe image contains a problem or question. Extract the problem and provide a complete solution. For coding problems, provide working code as the final answer. For math/logic/MCQ questions, provide step-by-step solution and final answer.` + const result = await this.model.generateContent([prompt, imagePart]) + const response = await result.response + const text = response.text() + return { text, timestamp: Date.now() } } catch (error) { - console.error("Error analyzing image file:", error); - throw error; + console.error("Error analyzing image file:", error) + throw error } } public async chatWithGemini(message: string): Promise { try { if (this.useOllama) { - return this.callOllama(message); + return this.callOllama(message) } else if (this.model) { - const result = await this.model.generateContent(message); - const response = await result.response; - return response.text(); + const result = await this.model.generateContent(message) + const response = await result.response + return response.text() } else { - throw new Error("No LLM provider configured"); + throw new Error("No LLM provider configured") } } catch (error) { - console.error("[LLMHelper] Error in chatWithGemini:", error); - throw error; + console.error("[LLMHelper] Error in chatWithGemini:", error) + throw error } } public async chat(message: string): Promise { - return this.chatWithGemini(message); + return this.chatWithGemini(message) } public isUsingOllama(): boolean { - return this.useOllama; + return this.useOllama } public async getOllamaModels(): Promise { - if (!this.useOllama) return []; - + if (!this.useOllama) return [] try { - const response = await fetch(`${this.ollamaUrl}/api/tags`); - if (!response.ok) throw new Error('Failed to fetch models'); - - const data = await response.json(); - return data.models?.map((model: any) => model.name) || []; + const response = await fetch(`${this.ollamaUrl}/api/tags`) + if (!response.ok) throw new Error('Failed to fetch models') + const data = await response.json() + return data.models?.map((model: any) => model.name) || [] } catch (error) { - console.error("[LLMHelper] Error fetching Ollama models:", error); - return []; + console.error("[LLMHelper] Error fetching Ollama models:", error) + return [] } } public getCurrentProvider(): "ollama" | "gemini" { - return this.useOllama ? "ollama" : "gemini"; + return this.useOllama ? "ollama" : "gemini" } public getCurrentModel(): string { - return this.useOllama ? this.ollamaModel : "gemini-2.0-flash"; + return this.useOllama ? this.ollamaModel : "gemini-2.5-flash" } public async switchToOllama(model?: string, url?: string): Promise { - this.useOllama = true; - if (url) this.ollamaUrl = url; - - if (model) { - this.ollamaModel = model; - } else { - // Auto-detect first available model - await this.initializeOllamaModel(); - } - - console.log(`[LLMHelper] Switched to Ollama: ${this.ollamaModel} at ${this.ollamaUrl}`); + this.useOllama = true + if (url) this.ollamaUrl = url + if (model) this.ollamaModel = model + else await this.initializeOllamaModel() + console.log(`[LLMHelper] Switched to Ollama: ${this.ollamaModel} at ${this.ollamaUrl}`) } public async switchToGemini(apiKey?: string): Promise { if (apiKey) { - const genAI = new GoogleGenerativeAI(apiKey); - this.model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" }); - } - - if (!this.model && !apiKey) { - throw new Error("No Gemini API key provided and no existing model instance"); + const genAI = new GoogleGenerativeAI(apiKey) + this.model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" }) } - - this.useOllama = false; - console.log("[LLMHelper] Switched to Gemini"); + if (!this.model && !apiKey) throw new Error("No Gemini API key provided and no existing model instance") + this.useOllama = false + console.log("[LLMHelper] Switched to Gemini") } public async testConnection(): Promise<{ success: boolean; error?: string }> { try { if (this.useOllama) { - const available = await this.checkOllamaAvailable(); - if (!available) { - return { success: false, error: `Ollama not available at ${this.ollamaUrl}` }; - } - // Test with a simple prompt - await this.callOllama("Hello"); - return { success: true }; + const available = await this.checkOllamaAvailable() + if (!available) return { success: false, error: `Ollama not available at ${this.ollamaUrl}` } + await this.callOllama("Hello") + return { success: true } } else { - if (!this.model) { - return { success: false, error: "No Gemini model configured" }; - } - // Test with a simple prompt - const result = await this.model.generateContent("Hello"); - const response = await result.response; - const text = response.text(); // Ensure the response is valid - if (text) { - return { success: true }; - } else { - return { success: false, error: "Empty response from Gemini" }; - } + if (!this.model) return { success: false, error: "No Gemini model configured" } + const result = await this.model.generateContent("Hello") + const response = await result.response + const text = response.text() + if (text) return { success: true } + return { success: false, error: "Empty response from Gemini" } } - } catch (error) { - return { success: false, error: error.message }; + } catch (error: any) { + return { success: false, error: error.message } } } -} \ No newline at end of file +}