diff --git a/.env.example b/.env.example index 627883a..674e882 100644 --- a/.env.example +++ b/.env.example @@ -33,12 +33,12 @@ REFRESH_INTERVAL_MINUTES=15 # === LLM Layer (optional) === # Enables AI-enhanced trade ideas and breaking news Telegram alerts. -# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama +# Provider options: anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama | grok LLM_PROVIDER= # Not needed for codex (uses ~/.codex/auth.json) or ollama (local) LLM_API_KEY= # Optional override. Each provider has a sensible default: -# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | ollama: llama3.1:8b +# anthropic: claude-sonnet-4-6 | openai: gpt-5.4 | gemini: gemini-3.1-pro | codex: gpt-5.3-codex | openrouter: openrouter/auto | minimax: MiniMax-M2.5 | ollama: llama3.1:8b | grok: grok-4-latest LLM_MODEL= # Ollama base URL (only needed if not using default http://localhost:11434) OLLAMA_BASE_URL= diff --git a/README.md b/README.md index 29089eb..a05f962 100644 --- a/README.md +++ b/README.md @@ -186,10 +186,10 @@ Alerts are delivered as rich embeds with color-coded sidebars: red for FLASH, ye **Optional dependency:** The full bot requires `discord.js`. Install it with `npm install discord.js`. If it's not installed, Crucix automatically falls back to webhook-only mode. ### Optional LLM Layer -Connect any of 6 LLM providers for enhanced analysis: +Connect any of 8 LLM providers for enhanced analysis: - **AI trade ideas** — quantitative analyst producing 5-8 actionable ideas citing specific data - **Smarter alert evaluation** — LLM classifies signals into FLASH/PRIORITY/ROUTINE tiers with cross-domain correlation and confidence scoring -- Providers: Anthropic Claude, OpenAI, Google Gemini, OpenRouter (Unified API), OpenAI Codex (ChatGPT subscription), MiniMax, Mistral +- Providers: Anthropic Claude, OpenAI, Google Gemini, OpenRouter (Unified API), OpenAI Codex (ChatGPT subscription), MiniMax, Mistral, Grok - Graceful fallback — when LLM is unavailable, a rule-based engine takes over alert evaluation. LLM failures never crash the sweep cycle. --- @@ -222,7 +222,7 @@ These three unlock the most valuable economic and satellite data. Each takes abo ### LLM Provider (optional, for AI-enhanced ideas) -Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral` +Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral`, `grok` | Provider | Key Required | Default Model | |----------|-------------|---------------| @@ -233,6 +233,7 @@ Set `LLM_PROVIDER` to one of: `anthropic`, `openai`, `gemini`, `codex`, `openrou | `codex` | None (uses `~/.codex/auth.json`) | gpt-5.3-codex | | `minimax` | `LLM_API_KEY` | MiniMax-M2.5 | | `mistral` | `LLM_API_KEY` | mistral-large-latest | +| `grok` | `LLM_API_KEY` | grok-4-latest | For Codex, run `npx @openai/codex login` to authenticate via your ChatGPT subscription. @@ -302,11 +303,12 @@ crucix/ │ └── jarvis.html # Self-contained Jarvis HUD │ ├── lib/ -│ ├── llm/ # LLM abstraction (5 providers, raw fetch, no SDKs) +│ ├── llm/ # LLM abstraction (8 providers, raw fetch, no SDKs) │ │ ├── provider.mjs # Base class │ │ ├── anthropic.mjs # Claude │ │ ├── openai.mjs # GPT │ │ ├── gemini.mjs # Gemini +│ │ ├── grok.mjs # Grok │ │ ├── openrouter.mjs # OpenRouter (Unified API) │ │ ├── codex.mjs # Codex (ChatGPT subscription) │ │ ├── minimax.mjs # MiniMax (M2.5, 204K context) @@ -412,7 +414,7 @@ All settings are in `.env` with sensible defaults: |----------|---------|-------------| | `PORT` | `3117` | Dashboard server port | | `REFRESH_INTERVAL_MINUTES` | `15` | Auto-refresh interval | -| `LLM_PROVIDER` | disabled | `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, or `mistral` | +| `LLM_PROVIDER` | disabled | `anthropic`, `openai`, `gemini`, `codex`, `openrouter`, `minimax`, `mistral`, or `grok` | | `LLM_API_KEY` | — | API key (not needed for codex) | | `LLM_MODEL` | per-provider default | Override model selection | | `TELEGRAM_BOT_TOKEN` | disabled | For Telegram alerts + bot commands | diff --git a/crucix.config.mjs b/crucix.config.mjs index a6c9ccd..887d760 100644 --- a/crucix.config.mjs +++ b/crucix.config.mjs @@ -7,7 +7,7 @@ export default { refreshIntervalMinutes: parseInt(process.env.REFRESH_INTERVAL_MINUTES) || 15, llm: { - provider: process.env.LLM_PROVIDER || null, // anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama + provider: process.env.LLM_PROVIDER || null, // anthropic | openai | gemini | codex | openrouter | minimax | mistral | ollama | grok apiKey: process.env.LLM_API_KEY || null, model: process.env.LLM_MODEL || null, baseUrl: process.env.OLLAMA_BASE_URL || null, diff --git a/lib/llm/grok.mjs b/lib/llm/grok.mjs new file mode 100644 index 0000000..76fef66 --- /dev/null +++ b/lib/llm/grok.mjs @@ -0,0 +1,54 @@ +// Grok Provider - raw fetch, no SDK + +import { LLMProvider } from './provider.mjs'; + +export class GrokProvider extends LLMProvider { + constructor(config) { + super(config); + this.name = 'grok'; + this.apiKey = config.apiKey; + this.model = config.model || 'grok-4-latest'; + } + + get isConfigured() { + return !!this.apiKey; + } + + async complete(systemPrompt, userMessage, opts = {}) { + const res = await fetch('https://api.x.ai/v1/chat/completions', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + max_tokens: opts.maxTokens || 4096, + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: userMessage }, + ], + model: this.model, + stream: false, + temperature: 0, + }), + signal: AbortSignal.timeout(opts.timeout || 60000), + }); + + if (!res.ok) { + const err = await res.text().catch(() => ''); + throw new Error(`Grok API ${res.status}: ${err.substring(0, 200)}`); + } + + const data = await res.json(); + const text = data.choices?.[0]?.message?.content || ''; + + return { + text, + usage: { + inputTokens: data.usage?.prompt_tokens || 0, + outputTokens: data.usage?.completion_tokens || 0, + }, + model: data.model || this.model, + }; + } +} diff --git a/lib/llm/index.mjs b/lib/llm/index.mjs index 0c0beb9..21fd64a 100644 --- a/lib/llm/index.mjs +++ b/lib/llm/index.mjs @@ -8,6 +8,7 @@ import { CodexProvider } from "./codex.mjs"; import { MiniMaxProvider } from "./minimax.mjs"; import { MistralProvider } from "./mistral.mjs"; import { OllamaProvider } from "./ollama.mjs"; +import { GrokProvider } from "./grok.mjs"; export { LLMProvider } from "./provider.mjs"; export { AnthropicProvider } from "./anthropic.mjs"; @@ -18,6 +19,7 @@ export { CodexProvider } from "./codex.mjs"; export { MiniMaxProvider } from "./minimax.mjs"; export { MistralProvider } from "./mistral.mjs"; export { OllamaProvider } from "./ollama.mjs"; +export { GrokProvider } from "./grok.mjs"; /** * Create an LLM provider based on config. @@ -46,6 +48,8 @@ export function createLLMProvider(llmConfig) { return new MistralProvider({ apiKey, model }); case "ollama": return new OllamaProvider({ model, baseUrl: llmConfig.baseUrl }); + case 'grok': + return new GrokProvider({ apiKey, model }); default: console.warn( `[LLM] Unknown provider "${provider}". LLM features disabled.`, diff --git a/test/llm-grok.test.mjs b/test/llm-grok.test.mjs new file mode 100644 index 0000000..1d400fe --- /dev/null +++ b/test/llm-grok.test.mjs @@ -0,0 +1,133 @@ +// Grok provider — unit tests +// Uses Node.js built-in test runner (node:test) — no extra dependencies + +import { describe, it, mock } from 'node:test'; +import assert from 'node:assert/strict'; +import { GrokProvider } from '../lib/llm/grok.mjs'; +import { createLLMProvider } from '../lib/llm/index.mjs'; + +// ─── Unit Tests ─── + +describe('GrokProvider', () => { + it('should set defaults correctly', () => { + const provider = new GrokProvider({ apiKey: 'sk-test' }); + assert.equal(provider.name, 'grok'); + assert.equal(provider.model, 'grok-4-latest'); + assert.equal(provider.isConfigured, true); + }); + + it('should accept custom model', () => { + const provider = new GrokProvider({ apiKey: 'sk-test', model: 'grok-2' }); + assert.equal(provider.model, 'grok-2'); + }); + + it('should report not configured without API key', () => { + const provider = new GrokProvider({}); + assert.equal(provider.isConfigured, false); + }); + + it('should throw on API error', async () => { + const provider = new GrokProvider({ apiKey: 'sk-test' }); + const originalFetch = globalThis.fetch; + globalThis.fetch = mock.fn(() => + Promise.resolve({ ok: false, status: 401, text: () => Promise.resolve('Unauthorized') }) + ); + try { + await assert.rejects( + () => provider.complete('system', 'user'), + (err) => { + assert.match(err.message, /Grok API 401/); + return true; + } + ); + } finally { + globalThis.fetch = originalFetch; + } + }); + + it('should parse successful response', async () => { + const provider = new GrokProvider({ apiKey: 'sk-test' }); + const mockResponse = { + choices: [{ message: { content: 'Hello world' } }], + usage: { prompt_tokens: 10, completion_tokens: 5 }, + model: 'grok-3' + }; + const originalFetch = globalThis.fetch; + globalThis.fetch = mock.fn(() => + Promise.resolve({ ok: true, json: () => Promise.resolve(mockResponse) }) + ); + try { + const result = await provider.complete('system', 'user'); + assert.equal(result.text, 'Hello world'); + assert.equal(result.usage.inputTokens, 10); + assert.equal(result.usage.outputTokens, 5); + assert.equal(result.model, 'grok-3'); + } finally { + globalThis.fetch = originalFetch; + } + }); + + it('should send correct request format', async () => { + const provider = new GrokProvider({ apiKey: 'sk-test-key', model: 'grok-4-latest' }); + let capturedUrl, capturedOpts; + const originalFetch = globalThis.fetch; + globalThis.fetch = mock.fn((url, opts) => { + capturedUrl = url; + capturedOpts = opts; + return Promise.resolve({ + ok: true, + json: () => Promise.resolve({ + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1 }, + model: 'grok-4-latest', + }), + }); + }); + try { + await provider.complete('system prompt', 'user message', { maxTokens: 2048 }); + assert.equal(capturedUrl, 'https://api.x.ai/v1/chat/completions'); + assert.equal(capturedOpts.method, 'POST'); + const headers = capturedOpts.headers; + assert.equal(headers['Content-Type'], 'application/json'); + assert.equal(headers.Authorization, 'Bearer sk-test-key'); + const body = JSON.parse(capturedOpts.body); + assert.equal(body.model, 'grok-4-latest'); + assert.equal(body.max_tokens, 2048); + assert.equal(body.messages[0].role, 'system'); + assert.equal(body.messages[0].content, 'system prompt'); + assert.equal(body.messages[1].role, 'user'); + assert.equal(body.messages[1].content, 'user message'); + } finally { + globalThis.fetch = originalFetch; + } + }); + + it('should handle empty response gracefully', async () => { + const provider = new GrokProvider({ apiKey: 'sk-test' }); + const originalFetch = globalThis.fetch; + globalThis.fetch = mock.fn(() => + Promise.resolve({ + ok: true, + json: () => Promise.resolve({ choices: [], usage: {} }), + }) + ); + try { + const result = await provider.complete('sys', 'user'); + assert.equal(result.text, ''); + assert.equal(result.usage.inputTokens, 0); + assert.equal(result.usage.outputTokens, 0); + } finally { + globalThis.fetch = originalFetch; + } + }); +}); + +// ─── Factory Tests ─── + +describe('createLLMProvider', () => { + it('should create Grok provider', () => { + const provider = createLLMProvider({ provider: 'grok', apiKey: 'sk-test' }); + assert.ok(provider instanceof GrokProvider); + assert.equal(provider.isConfigured, true); + }); +});