Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ REFRESH_INTERVAL_MINUTES=15

# === LLM Layer (optional) ===
# Enables AI-enhanced trade ideas and breaking news Telegram alerts.
# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama
# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama | grok
LLM_PROVIDER=
# Not needed for codex (uses ~/.codex/auth.json) or ollama (local)
LLM_API_KEY=
# Optional override. Each provider has a sensible default:
# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | ollama: llama3.1:8b
# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | ollama: llama3.1:8b | grok: grok-4-latest
LLM_MODEL=
# Ollama base URL (only needed if not using default http://localhost:11434)
OLLAMA_BASE_URL=
Expand Down
12 changes: 7 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -186,10 +186,10 @@ Alerts are delivered as rich embeds with color-coded sidebars: red for FLASH, ye
**Optional dependency:** The full bot requires `discord.js`. Install it with `npm install discord.js`. If it's not installed, Crucix automatically falls back to webhook-only mode.

### Optional LLM Layer
Connect any of 6 LLM providers for enhanced analysis:
Connect any of 8 LLM providers for enhanced analysis:
- **AI trade ideas** — quantitative analyst producing 5-8 actionable ideas citing specific data
- **Smarter alert evaluation** — LLM classifies signals into FLASH/PRIORITY/ROUTINE tiers with cross-domain correlation and confidence scoring
- Providers: Anthropic Claude, OpenAI, Google Gemini, OpenRouter (Unified API), OpenAI Codex (ChatGPT subscription), MiniMax, Mistral
- Providers: Anthropic Claude, OpenAI, Google Gemini, OpenRouter (Unified API), OpenAI Codex (ChatGPT subscription), MiniMax, Mistral, Grok
- Graceful fallback — when LLM is unavailable, a rule-based engine takes over alert evaluation. LLM failures never crash the sweep cycle.

---
Expand Down Expand Up @@ -222,7 +222,7 @@ These three unlock the most valuable economic and satellite data. Each takes abo

### LLM Provider (optional, for AI-enhanced ideas)

Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral`
Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral`, `grok`

| Provider | Key Required | Default Model |
|----------|-------------|---------------|
Expand All @@ -233,6 +233,7 @@ Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrou
| `codex` | None (uses `~/.codex/auth.json`) | gpt-5.3-codex |
| `minimax` | `LLM_API_KEY` | MiniMax-M2.5 |
| `mistral` | `LLM_API_KEY` | mistral-large-latest |
| `grok` | `LLM_API_KEY` | grok-4-latest |

For Codex, run `npx @openai/codex login` to authenticate via your ChatGPT subscription.

Expand Down Expand Up @@ -302,11 +303,12 @@ crucix/
│ └── jarvis.html # Self-contained Jarvis HUD
├── lib/
│ ├── llm/ # LLM abstraction (5 providers, raw fetch, no SDKs)
│ ├── llm/ # LLM abstraction (8 providers, raw fetch, no SDKs)
│ │ ├── provider.mjs # Base class
│ │ ├── anthropic.mjs # Claude
│ │ ├── openai.mjs # GPT
│ │ ├── gemini.mjs # Gemini
│ │ ├── grok.mjs # Grok
│ │ ├── openrouter.mjs # OpenRouter (Unified API)
│ │ ├── codex.mjs # Codex (ChatGPT subscription)
│ │ ├── minimax.mjs # MiniMax (M2.5, 204K context)
Expand Down Expand Up @@ -412,7 +414,7 @@ All settings are in `.env` with sensible defaults:
|----------|---------|-------------|
| `PORT` | `3117` | Dashboard server port |
| `REFRESH_INTERVAL_MINUTES` | `15` | Auto-refresh interval |
| `LLM_PROVIDER` | disabled | `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, or `mistral` |
| `LLM_PROVIDER` | disabled | `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral`, or `grok` |
| `LLM_API_KEY` | — | API key (not needed for codex) |
| `LLM_MODEL` | per-provider default | Override model selection |
| `TELEGRAM_BOT_TOKEN` | disabled | For Telegram alerts + bot commands |
Expand Down
2 changes: 1 addition & 1 deletion crucix.config.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ export default {
refreshIntervalMinutes: parseInt(process.env.REFRESH_INTERVAL_MINUTES) || 15,

llm: {
provider: process.env.LLM_PROVIDER || null, // anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama
provider: process.env.LLM_PROVIDER || null, // anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama | grok
apiKey: process.env.LLM_API_KEY || null,
model: process.env.LLM_MODEL || null,
baseUrl: process.env.OLLAMA_BASE_URL || null,
Expand Down
54 changes: 54 additions & 0 deletions lib/llm/grok.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Grok Provider - raw fetch, no SDK

import { LLMProvider } from './provider.mjs';

export class GrokProvider extends LLMProvider {
constructor(config) {
super(config);
this.name = 'grok';
this.apiKey = config.apiKey;
this.model = config.model || 'grok-4-latest';
}

get isConfigured() {
return !!this.apiKey;
}

async complete(systemPrompt, userMessage, opts = {}) {
const res = await fetch('https://api.x.ai/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
max_tokens: opts.maxTokens || 4096,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userMessage },
],
model: this.model,
stream: false,
temperature: 0,
}),
signal: AbortSignal.timeout(opts.timeout || 60000),
});

if (!res.ok) {
const err = await res.text().catch(() => '');
throw new Error(`Grok API ${res.status}: ${err.substring(0, 200)}`);
}

const data = await res.json();
const text = data.choices?.[0]?.message?.content || '';

return {
text,
usage: {
inputTokens: data.usage?.prompt_tokens || 0,
outputTokens: data.usage?.completion_tokens || 0,
},
model: data.model || this.model,
};
}
}
4 changes: 4 additions & 0 deletions lib/llm/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import { CodexProvider } from "./codex.mjs";
import { MiniMaxProvider } from "./minimax.mjs";
import { MistralProvider } from "./mistral.mjs";
import { OllamaProvider } from "./ollama.mjs";
import { GrokProvider } from "./grok.mjs";

export { LLMProvider } from "./provider.mjs";
export { AnthropicProvider } from "./anthropic.mjs";
Expand All @@ -18,6 +19,7 @@ export { CodexProvider } from "./codex.mjs";
export { MiniMaxProvider } from "./minimax.mjs";
export { MistralProvider } from "./mistral.mjs";
export { OllamaProvider } from "./ollama.mjs";
export { GrokProvider } from "./grok.mjs";

/**
* Create an LLM provider based on config.
Expand Down Expand Up @@ -46,6 +48,8 @@ export function createLLMProvider(llmConfig) {
return new MistralProvider({ apiKey, model });
case "ollama":
return new OllamaProvider({ model, baseUrl: llmConfig.baseUrl });
case 'grok':
return new GrokProvider({ apiKey, model });
default:
console.warn(
`[LLM] Unknown provider "${provider}". LLM features disabled.`,
Expand Down
133 changes: 133 additions & 0 deletions test/llm-grok.test.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
// Grok provider — unit tests
// Uses Node.js built-in test runner (node:test) — no extra dependencies

import { describe, it, mock } from 'node:test';
import assert from 'node:assert/strict';
import { GrokProvider } from '../lib/llm/grok.mjs';
import { createLLMProvider } from '../lib/llm/index.mjs';

// ─── Unit Tests ───

describe('GrokProvider', () => {
it('should set defaults correctly', () => {
const provider = new GrokProvider({ apiKey: 'sk-test' });
assert.equal(provider.name, 'grok');
assert.equal(provider.model, 'grok-4-latest');
assert.equal(provider.isConfigured, true);
});

it('should accept custom model', () => {
const provider = new GrokProvider({ apiKey: 'sk-test', model: 'grok-2' });
assert.equal(provider.model, 'grok-2');
});

it('should report not configured without API key', () => {
const provider = new GrokProvider({});
assert.equal(provider.isConfigured, false);
});

it('should throw on API error', async () => {
const provider = new GrokProvider({ apiKey: 'sk-test' });
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn(() =>
Promise.resolve({ ok: false, status: 401, text: () => Promise.resolve('Unauthorized') })
);
try {
await assert.rejects(
() => provider.complete('system', 'user'),
(err) => {
assert.match(err.message, /Grok API 401/);
return true;
}
);
} finally {
globalThis.fetch = originalFetch;
}
});

it('should parse successful response', async () => {
const provider = new GrokProvider({ apiKey: 'sk-test' });
const mockResponse = {
choices: [{ message: { content: 'Hello world' } }],
usage: { prompt_tokens: 10, completion_tokens: 5 },
model: 'grok-3'
};
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn(() =>
Promise.resolve({ ok: true, json: () => Promise.resolve(mockResponse) })
);
try {
const result = await provider.complete('system', 'user');
assert.equal(result.text, 'Hello world');
assert.equal(result.usage.inputTokens, 10);
assert.equal(result.usage.outputTokens, 5);
assert.equal(result.model, 'grok-3');
} finally {
globalThis.fetch = originalFetch;
}
});

it('should send correct request format', async () => {
const provider = new GrokProvider({ apiKey: 'sk-test-key', model: 'grok-4-latest' });
let capturedUrl, capturedOpts;
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn((url, opts) => {
capturedUrl = url;
capturedOpts = opts;
return Promise.resolve({
ok: true,
json: () => Promise.resolve({
choices: [{ message: { content: 'ok' } }],
usage: { prompt_tokens: 1, completion_tokens: 1 },
model: 'grok-4-latest',
}),
});
});
try {
await provider.complete('system prompt', 'user message', { maxTokens: 2048 });
assert.equal(capturedUrl, 'https://api.x.ai/v1/chat/completions');
assert.equal(capturedOpts.method, 'POST');
const headers = capturedOpts.headers;
assert.equal(headers['Content-Type'], 'application/json');
assert.equal(headers.Authorization, 'Bearer sk-test-key');
const body = JSON.parse(capturedOpts.body);
assert.equal(body.model, 'grok-4-latest');
assert.equal(body.max_tokens, 2048);
assert.equal(body.messages[0].role, 'system');
assert.equal(body.messages[0].content, 'system prompt');
assert.equal(body.messages[1].role, 'user');
assert.equal(body.messages[1].content, 'user message');
} finally {
globalThis.fetch = originalFetch;
}
});

it('should handle empty response gracefully', async () => {
const provider = new GrokProvider({ apiKey: 'sk-test' });
const originalFetch = globalThis.fetch;
globalThis.fetch = mock.fn(() =>
Promise.resolve({
ok: true,
json: () => Promise.resolve({ choices: [], usage: {} }),
})
);
try {
const result = await provider.complete('sys', 'user');
assert.equal(result.text, '');
assert.equal(result.usage.inputTokens, 0);
assert.equal(result.usage.outputTokens, 0);
} finally {
globalThis.fetch = originalFetch;
}
});
});

// ─── Factory Tests ───

describe('createLLMProvider', () => {
it('should create Grok provider', () => {
const provider = createLLMProvider({ provider: 'grok', apiKey: 'sk-test' });
assert.ok(provider instanceof GrokProvider);
assert.equal(provider.isConfigured, true);
});
});
Loading