Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,23 @@ REFRESH_INTERVAL_MINUTES=15

# === LLM Layer (optional) ===
# Enables AI-enhanced trade ideas and breaking news Telegram alerts.
# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral
# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama
LLM_PROVIDER=
# Not needed for codex (uses ~/.codex/auth.json)
# Not needed for codex (uses ~/.codex/auth.json) or ollama (local, no key required)
LLM_API_KEY=
# Optional override. Each provider has a sensible default:
# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5
# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | ollama: llama3.2
LLM_MODEL=
# Ollama only — base URL of your Ollama instance (default: http://localhost:11434)
OLLAMA_BASE_URL=

# === Localization (optional) ===
# Set UI language. Supported: en (English), fr (French), vi (Vietnamese)
# Trade ideas and LLM output will also be generated in the selected language.
CRUCIX_LANG=
# Translate dynamic content (news/OSINT headlines) via LLM when lang != en
# Requires LLM_PROVIDER to be configured. Default: true when lang is set.
CRUCIX_TRANSLATE=true

# === Telegram Alerts (optional, requires LLM) ===
# Create a bot via @BotFather, get chat ID via @userinfobot
Expand Down
4 changes: 4 additions & 0 deletions lib/llm/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import { GeminiProvider } from './gemini.mjs';
import { CodexProvider } from './codex.mjs';
import { MiniMaxProvider } from './minimax.mjs';
import { MistralProvider } from './mistral.mjs';
import { OllamaProvider } from './ollama.mjs';

export { LLMProvider } from './provider.mjs';
export { AnthropicProvider } from './anthropic.mjs';
Expand All @@ -16,6 +17,7 @@ export { GeminiProvider } from './gemini.mjs';
export { CodexProvider } from './codex.mjs';
export { MiniMaxProvider } from './minimax.mjs';
export { MistralProvider } from './mistral.mjs';
export { OllamaProvider } from './ollama.mjs';

/**
* Create an LLM provider based on config.
Expand All @@ -42,6 +44,8 @@ export function createLLMProvider(llmConfig) {
return new MiniMaxProvider({ apiKey, model });
case 'mistral':
return new MistralProvider({ apiKey, model });
case 'ollama':
return new OllamaProvider({ model, baseUrl: llmConfig.ollamaBaseUrl });
default:
console.warn(`[LLM] Unknown provider "${provider}". LLM features disabled.`);
return null;
Expand Down
53 changes: 53 additions & 0 deletions lib/llm/ollama.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
// Ollama Provider — local LLM via Ollama REST API (no API key required)
// Docs: https://github.com/ollama/ollama/blob/main/docs/api.md

import { LLMProvider } from './provider.mjs';

export class OllamaProvider extends LLMProvider {
constructor(config) {
super(config);
this.name = 'ollama';
this.model = config.model || 'llama3.2';
this.baseUrl = (config.baseUrl || process.env.OLLAMA_BASE_URL || 'http://localhost:11434').replace(/\/$/, '');
}

get isConfigured() { return !!this.model; }

async complete(systemPrompt, userMessage, opts = {}) {
const url = `${this.baseUrl}/api/chat`;

const res = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.model,
stream: false,
options: {
num_predict: opts.maxTokens || 4096,
},
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userMessage },
],
}),
signal: AbortSignal.timeout(opts.timeout || 120000), // Ollama local can be slower
});

if (!res.ok) {
const err = await res.text().catch(() => '');
throw new Error(`Ollama API ${res.status}: ${err.substring(0, 200)}`);
}

const data = await res.json();
const text = data.message?.content || '';

return {
text,
usage: {
inputTokens: data.prompt_eval_count || 0,
outputTokens: data.eval_count || 0,
},
model: data.model || this.model,
};
}
}