diff --git a/.env.example b/.env.example index b44266f..1dcecf2 100644 --- a/.env.example +++ b/.env.example @@ -31,13 +31,23 @@ REFRESH_INTERVAL_MINUTES=15 # === LLM Layer (optional) === # Enables AI-enhanced trade ideas and breaking news Telegram alerts. -# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral +# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama LLM_PROVIDER= -# Not needed for codex (uses ~/.codex/auth.json) +# Not needed for codex (uses ~/.codex/auth.json) or ollama (local, no key required) LLM_API_KEY= # Optional override. Each provider has a sensible default: -# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 +# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | ollama: llama3.2 LLM_MODEL= +# Ollama only — base URL of your Ollama instance (default: http://localhost:11434) +OLLAMA_BASE_URL= + +# === Localization (optional) === +# Set UI language. Supported: en (English), fr (French), vi (Vietnamese) +# Trade ideas and LLM output will also be generated in the selected language. +CRUCIX_LANG= +# Translate dynamic content (news/OSINT headlines) via LLM when lang != en +# Requires LLM_PROVIDER to be configured. Default: true when lang is set. +CRUCIX_TRANSLATE=true # === Telegram Alerts (optional, requires LLM) === # Create a bot via @BotFather, get chat ID via @userinfobot diff --git a/lib/llm/index.mjs b/lib/llm/index.mjs index b2d16ee..7d54b3a 100644 --- a/lib/llm/index.mjs +++ b/lib/llm/index.mjs @@ -7,6 +7,7 @@ import { GeminiProvider } from './gemini.mjs'; import { CodexProvider } from './codex.mjs'; import { MiniMaxProvider } from './minimax.mjs'; import { MistralProvider } from './mistral.mjs'; +import { OllamaProvider } from './ollama.mjs'; export { LLMProvider } from './provider.mjs'; export { AnthropicProvider } from './anthropic.mjs'; @@ -16,6 +17,7 @@ export { GeminiProvider } from './gemini.mjs'; export { CodexProvider } from './codex.mjs'; export { MiniMaxProvider } from './minimax.mjs'; export { MistralProvider } from './mistral.mjs'; +export { OllamaProvider } from './ollama.mjs'; /** * Create an LLM provider based on config. @@ -42,6 +44,8 @@ export function createLLMProvider(llmConfig) { return new MiniMaxProvider({ apiKey, model }); case 'mistral': return new MistralProvider({ apiKey, model }); + case 'ollama': + return new OllamaProvider({ model, baseUrl: llmConfig.ollamaBaseUrl }); default: console.warn(`[LLM] Unknown provider "${provider}". LLM features disabled.`); return null; diff --git a/lib/llm/ollama.mjs b/lib/llm/ollama.mjs new file mode 100644 index 0000000..76926a8 --- /dev/null +++ b/lib/llm/ollama.mjs @@ -0,0 +1,53 @@ +// Ollama Provider — local LLM via Ollama REST API (no API key required) +// Docs: https://github.com/ollama/ollama/blob/main/docs/api.md + +import { LLMProvider } from './provider.mjs'; + +export class OllamaProvider extends LLMProvider { + constructor(config) { + super(config); + this.name = 'ollama'; + this.model = config.model || 'llama3.2'; + this.baseUrl = (config.baseUrl || process.env.OLLAMA_BASE_URL || 'http://localhost:11434').replace(/\/$/, ''); + } + + get isConfigured() { return !!this.model; } + + async complete(systemPrompt, userMessage, opts = {}) { + const url = `${this.baseUrl}/api/chat`; + + const res = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: this.model, + stream: false, + options: { + num_predict: opts.maxTokens || 4096, + }, + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: userMessage }, + ], + }), + signal: AbortSignal.timeout(opts.timeout || 120000), // Ollama local can be slower + }); + + if (!res.ok) { + const err = await res.text().catch(() => ''); + throw new Error(`Ollama API ${res.status}: ${err.substring(0, 200)}`); + } + + const data = await res.json(); + const text = data.message?.content || ''; + + return { + text, + usage: { + inputTokens: data.prompt_eval_count || 0, + outputTokens: data.eval_count || 0, + }, + model: data.model || this.model, + }; + } +}