Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,15 @@ REFRESH_INTERVAL_MINUTES=15

# === LLM Layer (optional) ===
# Enables AI-enhanced trade ideas and breaking news Telegram alerts.
# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral
# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama
LLM_PROVIDER=
# Not needed for codex (uses ~/.codex/auth.json)
# Not needed for codex (uses ~/.codex/auth.json) or ollama (local)
LLM_API_KEY=
# Optional override. Each provider has a sensible default:
# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5
# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | ollama: llama3.1:8b
LLM_MODEL=
# Ollama base URL (only needed if not using default http://localhost:11434)
OLLAMA_BASE_URL=

# === Telegram Alerts (optional, requires LLM) ===
# Create a bot via @BotFather, get chat ID via @userinfobot
Expand Down
7 changes: 4 additions & 3 deletions crucix.config.mjs
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
// Crucix Configuration — all settings with env var overrides

import './apis/utils/env.mjs'; // Load .env first
import "./apis/utils/env.mjs"; // Load .env first

export default {
port: parseInt(process.env.PORT) || 3117,
refreshIntervalMinutes: parseInt(process.env.REFRESH_INTERVAL_MINUTES) || 15,

llm: {
provider: process.env.LLM_PROVIDER || null, // anthropic | openai | gemini | codex | openrouter | minimax | mistral
provider: process.env.LLM_PROVIDER || null, // anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama
apiKey: process.env.LLM_API_KEY || null,
model: process.env.LLM_MODEL || null,
baseUrl: process.env.OLLAMA_BASE_URL || null,
},

telegram: {
Expand All @@ -22,7 +23,7 @@ export default {
discord: {
botToken: process.env.DISCORD_BOT_TOKEN || null,
channelId: process.env.DISCORD_CHANNEL_ID || null,
guildId: process.env.DISCORD_GUILD_ID || null, // Server ID (for instant slash command registration)
guildId: process.env.DISCORD_GUILD_ID || null, // Server ID (for instant slash command registration)
webhookUrl: process.env.DISCORD_WEBHOOK_URL || null, // Fallback: webhook-only alerts (no bot needed)
},

Expand Down
52 changes: 29 additions & 23 deletions lib/llm/index.mjs
Original file line number Diff line number Diff line change
@@ -1,21 +1,23 @@
// LLM Factory — creates the configured provider or returns null

import { AnthropicProvider } from './anthropic.mjs';
import { OpenAIProvider } from './openai.mjs';
import { OpenRouterProvider } from './openrouter.mjs';
import { GeminiProvider } from './gemini.mjs';
import { CodexProvider } from './codex.mjs';
import { MiniMaxProvider } from './minimax.mjs';
import { MistralProvider } from './mistral.mjs';
import { AnthropicProvider } from "./anthropic.mjs";
import { OpenAIProvider } from "./openai.mjs";
import { OpenRouterProvider } from "./openrouter.mjs";
import { GeminiProvider } from "./gemini.mjs";
import { CodexProvider } from "./codex.mjs";
import { MiniMaxProvider } from "./minimax.mjs";
import { MistralProvider } from "./mistral.mjs";
import { OllamaProvider } from "./ollama.mjs";

export { LLMProvider } from './provider.mjs';
export { AnthropicProvider } from './anthropic.mjs';
export { OpenAIProvider } from './openai.mjs';
export { OpenRouterProvider } from './openrouter.mjs';
export { GeminiProvider } from './gemini.mjs';
export { CodexProvider } from './codex.mjs';
export { MiniMaxProvider } from './minimax.mjs';
export { MistralProvider } from './mistral.mjs';
export { LLMProvider } from "./provider.mjs";
export { AnthropicProvider } from "./anthropic.mjs";
export { OpenAIProvider } from "./openai.mjs";
export { OpenRouterProvider } from "./openrouter.mjs";
export { GeminiProvider } from "./gemini.mjs";
export { CodexProvider } from "./codex.mjs";
export { MiniMaxProvider } from "./minimax.mjs";
export { MistralProvider } from "./mistral.mjs";
export { OllamaProvider } from "./ollama.mjs";

/**
* Create an LLM provider based on config.
Expand All @@ -28,22 +30,26 @@ export function createLLMProvider(llmConfig) {
const { provider, apiKey, model } = llmConfig;

switch (provider.toLowerCase()) {
case 'anthropic':
case "anthropic":
return new AnthropicProvider({ apiKey, model });
case 'openai':
case "openai":
return new OpenAIProvider({ apiKey, model });
case 'openrouter':
case "openrouter":
return new OpenRouterProvider({ apiKey, model });
case 'gemini':
case "gemini":
return new GeminiProvider({ apiKey, model });
case 'codex':
case "codex":
return new CodexProvider({ model });
case 'minimax':
case "minimax":
return new MiniMaxProvider({ apiKey, model });
case 'mistral':
case "mistral":
return new MistralProvider({ apiKey, model });
case "ollama":
return new OllamaProvider({ model, baseUrl: llmConfig.baseUrl });
default:
console.warn(`[LLM] Unknown provider "${provider}". LLM features disabled.`);
console.warn(
`[LLM] Unknown provider "${provider}". LLM features disabled.`,
);
return null;
}
}
49 changes: 49 additions & 0 deletions lib/llm/ollama.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Ollama Provider — raw fetch, no SDK
// Uses Ollama's OpenAI-compatible Chat Completions API
// No API key required — fully local inference

import { LLMProvider } from './provider.mjs';

export class OllamaProvider extends LLMProvider {
constructor(config) {
super(config);
this.name = 'ollama';
this.baseUrl = (config.baseUrl || 'http://localhost:11434').replace(/\/+$/, '');
this.model = config.model || 'llama3.1:8b';
}

get isConfigured() { return !!this.model; }

async complete(systemPrompt, userMessage, opts = {}) {
const res = await fetch(`${this.baseUrl}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.model,
max_tokens: opts.maxTokens || 4096,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userMessage },
],
}),
signal: AbortSignal.timeout(opts.timeout || 120000),
});

if (!res.ok) {
const err = await res.text().catch(() => '');
throw new Error(`Ollama API ${res.status}: ${err.substring(0, 200)}`);
}

const data = await res.json();
const text = data.choices?.[0]?.message?.content || '';

return {
text,
usage: {
inputTokens: data.usage?.prompt_tokens || 0,
outputTokens: data.usage?.completion_tokens || 0,
},
model: data.model || this.model,
};
}
}
36 changes: 36 additions & 0 deletions test/llm-ollama-integration.test.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// Ollama provider — integration test (calls real Ollama instance)
// Requires a running Ollama server with a model pulled
// Run: OLLAMA_MODEL=llama3.1:8b node --test test/llm-ollama-integration.test.mjs

import { describe, it } from 'node:test';
import assert from 'node:assert/strict';
import { OllamaProvider } from '../lib/llm/ollama.mjs';

const BASE_URL = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
const MODEL = process.env.OLLAMA_MODEL || 'llama3.1:8b';

// Check if Ollama is reachable before running tests
let ollamaAvailable = false;
try {
const res = await fetch(`${BASE_URL}/api/tags`, { signal: AbortSignal.timeout(3000) });
ollamaAvailable = res.ok;
} catch { /* not available */ }

describe('Ollama integration', { skip: !ollamaAvailable && 'Ollama not reachable' }, () => {
it('should complete a prompt via local Ollama', async () => {
const provider = new OllamaProvider({ model: MODEL, baseUrl: BASE_URL });
assert.equal(provider.isConfigured, true);

const result = await provider.complete(
'You are a helpful assistant. Respond in exactly one sentence.',
'What is 2+2?',
{ maxTokens: 128, timeout: 60000 }
);

assert.ok(result.text.length > 0, 'Response text should not be empty');
assert.ok(result.model, 'Should report model name');
console.log(` Response: ${result.text}`);
console.log(` Tokens: ${result.usage.inputTokens} in / ${result.usage.outputTokens} out`);
console.log(` Model: ${result.model}`);
});
});
170 changes: 170 additions & 0 deletions test/llm-ollama.test.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
// Ollama provider — unit tests
// Uses Node.js built-in test runner (node:test) — no extra dependencies

import { describe, it, mock, beforeEach } from 'node:test';
import assert from 'node:assert/strict';
import { OllamaProvider } from '../lib/llm/ollama.mjs';
import { createLLMProvider } from '../lib/llm/index.mjs';

// ─── Unit Tests ───

describe('OllamaProvider', () => {
it('should set defaults correctly', () => {
const provider = new OllamaProvider({});
assert.equal(provider.name, 'ollama');
assert.equal(provider.model, 'llama3.1:8b');
assert.equal(provider.baseUrl, 'http://localhost:11434');
assert.equal(provider.isConfigured, true);
});

it('should accept custom model and base URL', () => {
const provider = new OllamaProvider({ model: 'qwen2.5:14b', baseUrl: 'http://192.168.1.10:11434' });
assert.equal(provider.model, 'qwen2.5:14b');
assert.equal(provider.baseUrl, 'http://192.168.1.10:11434');
});

it('should strip trailing slashes from base URL', () => {
const provider = new OllamaProvider({ baseUrl: 'http://localhost:11434/' });
assert.equal(provider.baseUrl, 'http://localhost:11434');
});

it('should throw on API error', async () => {
const provider = new OllamaProvider({});
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn(() =>
Promise.resolve({ ok: false, status: 404, text: () => Promise.resolve('model not found') })
);
try {
await assert.rejects(
() => provider.complete('system', 'user'),
(err) => {
assert.match(err.message, /Ollama API 404/);
return true;
}
);
} finally {
globalThis.fetch = originalFetch;
}
});

it('should parse successful response', async () => {
const provider = new OllamaProvider({});
const mockResponse = {
choices: [{ message: { content: 'Hello from Ollama' } }],
usage: { prompt_tokens: 12, completion_tokens: 8 },
model: 'llama3.1:8b',
};
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn(() =>
Promise.resolve({ ok: true, json: () => Promise.resolve(mockResponse) })
);
try {
const result = await provider.complete('You are helpful.', 'Say hello');
assert.equal(result.text, 'Hello from Ollama');
assert.equal(result.usage.inputTokens, 12);
assert.equal(result.usage.outputTokens, 8);
assert.equal(result.model, 'llama3.1:8b');
} finally {
globalThis.fetch = originalFetch;
}
});

it('should send correct request format', async () => {
const provider = new OllamaProvider({ model: 'qwen2.5:14b', baseUrl: 'http://myhost:11434' });
let capturedUrl, capturedOpts;
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn((url, opts) => {
capturedUrl = url;
capturedOpts = opts;
return Promise.resolve({
ok: true,
json: () => Promise.resolve({
choices: [{ message: { content: 'ok' } }],
usage: { prompt_tokens: 1, completion_tokens: 1 },
model: 'qwen2.5:14b',
}),
});
});
try {
await provider.complete('system prompt', 'user message', { maxTokens: 2048 });
assert.equal(capturedUrl, 'http://myhost:11434/v1/chat/completions');
assert.equal(capturedOpts.method, 'POST');
const headers = capturedOpts.headers;
assert.equal(headers['Content-Type'], 'application/json');
assert.equal(headers['Authorization'], undefined);
const body = JSON.parse(capturedOpts.body);
assert.equal(body.model, 'qwen2.5:14b');
assert.equal(body.max_tokens, 2048);
assert.equal(body.messages[0].role, 'system');
assert.equal(body.messages[0].content, 'system prompt');
assert.equal(body.messages[1].role, 'user');
assert.equal(body.messages[1].content, 'user message');
} finally {
globalThis.fetch = originalFetch;
}
});

it('should handle empty response gracefully', async () => {
const provider = new OllamaProvider({});
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ choices: [], usage: {} }),
})
);
try {
const result = await provider.complete('sys', 'user');
assert.equal(result.text, '');
assert.equal(result.usage.inputTokens, 0);
assert.equal(result.usage.outputTokens, 0);
} finally {
globalThis.fetch = originalFetch;
}
});

it('should use longer default timeout than cloud providers', async () => {
const provider = new OllamaProvider({});
let capturedOpts;
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn((url, opts) => {
capturedOpts = opts;
return Promise.resolve({
ok: true,
json: () => Promise.resolve({
choices: [{ message: { content: 'ok' } }],
usage: { prompt_tokens: 1, completion_tokens: 1 },
}),
});
});
try {
await provider.complete('sys', 'user');
assert.ok(capturedOpts.signal, 'Should have an abort signal');
} finally {
globalThis.fetch = originalFetch;
}
});
});

// ─── Factory Tests ───

describe('createLLMProvider — ollama', () => {
it('should create OllamaProvider for provider=ollama', () => {
const provider = createLLMProvider({ provider: 'ollama', apiKey: null, model: null });
assert.ok(provider instanceof OllamaProvider);
assert.equal(provider.name, 'ollama');
assert.equal(provider.isConfigured, true);
});

it('should be case-insensitive', () => {
const provider = createLLMProvider({ provider: 'Ollama', apiKey: null, model: null });
assert.ok(provider instanceof OllamaProvider);
});

it('should pass baseUrl from config', () => {
const provider = createLLMProvider({ provider: 'ollama', apiKey: null, model: 'mistral:7b', baseUrl: 'http://gpu-box:11434' });
assert.ok(provider instanceof OllamaProvider);
assert.equal(provider.baseUrl, 'http://gpu-box:11434');
assert.equal(provider.model, 'mistral:7b');
});
});
Loading