diff --git a/src/lib/agents/search/researcher/actions/webSearch.ts b/src/lib/agents/search/researcher/actions/webSearch.ts index 4d60b79f2..69f38582a 100644 --- a/src/lib/agents/search/researcher/actions/webSearch.ts +++ b/src/lib/agents/search/researcher/actions/webSearch.ts @@ -85,6 +85,13 @@ const webSearchAction: ResearchAction = { config.sources.includes('web') && config.classification.classification.skipSearch === false, execute: async (input, additionalConfig) => { + // Guard against undefined or empty queries + if (!input.queries || !Array.isArray(input.queries) || input.queries.length === 0) { + return { + type: 'search_results', + results: [], + }; + } input.queries = input.queries.slice(0, 3); const researchBlock = additionalConfig.session.getBlock( diff --git a/src/lib/agents/search/researcher/index.ts b/src/lib/agents/search/researcher/index.ts index d6532819e..65db1bd73 100644 --- a/src/lib/agents/search/researcher/index.ts +++ b/src/lib/agents/search/researcher/index.ts @@ -44,12 +44,13 @@ class Researcher { }, }); + const chatHistory = input.chatHistory || []; const agentMessageHistory: Message[] = [ { role: 'user', content: ` - ${formatChatHistoryAsString(input.chatHistory.slice(-10))} + ${formatChatHistoryAsString(chatHistory.slice(-10))} User: ${input.followUp} (Standalone question: ${input.classification.standaloneFollowUp}) `, diff --git a/src/lib/models/providers/ollama/ollamaLLM.ts b/src/lib/models/providers/ollama/ollamaLLM.ts index 3bcd3ccf1..29c0ce403 100644 --- a/src/lib/models/providers/ollama/ollamaLLM.ts +++ b/src/lib/models/providers/ollama/ollamaLLM.ts @@ -12,6 +12,7 @@ import { parse } from 'partial-json'; import crypto from 'crypto'; import { Message } from '@/lib/types'; import { repairJson } from '@toolsycc/json-repair'; +import { stripMarkdownFences } from '@/lib/utils/parseJson'; type OllamaConfig = { baseURL: string; @@ -206,9 +207,13 @@ class OllamaLLM extends BaseLLM { }); try { + const content = stripMarkdownFences(response.message.content); + if (!content.trim()) { + throw new Error('Empty response from model'); + } return input.schema.parse( JSON.parse( - repairJson(response.message.content, { + repairJson(content, { extractJson: true, }) as string, ), @@ -248,10 +253,13 @@ class OllamaLLM extends BaseLLM { for await (const chunk of stream) { recievedObj += chunk.message.content; + // Strip markdown fences if present + const cleanedObj = stripMarkdownFences(recievedObj); + try { - yield parse(recievedObj) as T; + yield parse(cleanedObj) as T; } catch (err) { - console.log('Error parsing partial object from Ollama:', err); + // Partial JSON may not be parseable yet, yield empty object yield {} as T; } } diff --git a/src/lib/models/providers/openai/openaiLLM.ts b/src/lib/models/providers/openai/openaiLLM.ts index 5ae1538a0..858ccb0c5 100644 --- a/src/lib/models/providers/openai/openaiLLM.ts +++ b/src/lib/models/providers/openai/openaiLLM.ts @@ -1,13 +1,11 @@ import OpenAI from 'openai'; import BaseLLM from '../../base/llm'; -import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod'; import { GenerateObjectInput, GenerateOptions, GenerateTextInput, GenerateTextOutput, StreamTextOutput, - ToolCall, } from '../../types'; import { parse } from 'partial-json'; import z from 'zod'; @@ -19,6 +17,7 @@ import { } from 'openai/resources/index.mjs'; import { Message } from '@/lib/types'; import { repairJson } from '@toolsycc/json-repair'; +import { stripMarkdownFences } from '@/lib/utils/parseJson'; type OpenAIConfig = { apiKey: string; @@ -44,21 +43,24 @@ class OpenAILLM extends BaseLLM { if (msg.role === 'tool') { return { role: 'tool', - tool_call_id: msg.id, - content: msg.content, + tool_call_id: msg.id || '', + content: msg.content || '', } as ChatCompletionToolMessageParam; } else if (msg.role === 'assistant') { return { role: 'assistant', - content: msg.content, + content: msg.content || '', ...(msg.tool_calls && msg.tool_calls.length > 0 && { tool_calls: msg.tool_calls?.map((tc) => ({ - id: tc.id, + id: tc.id || '', type: 'function', function: { - name: tc.name, - arguments: JSON.stringify(tc.arguments), + name: tc.name || '', + arguments: + typeof tc.arguments === 'string' + ? tc.arguments + : JSON.stringify(tc.arguments || {}), }, })), }), @@ -164,27 +166,43 @@ class OpenAILLM extends BaseLLM { for await (const chunk of stream) { if (chunk.choices && chunk.choices.length > 0) { const toolCalls = chunk.choices[0].delta.tool_calls; - yield { - contentChunk: chunk.choices[0].delta.content || '', - toolCallChunk: - toolCalls?.map((tc) => { + let parsedToolCalls: any[] = []; + + if (toolCalls) { + for (const tc of toolCalls) { + try { if (!recievedToolCalls[tc.index]) { const call = { - name: tc.function?.name!, - id: tc.id!, + name: tc.function?.name || '', + id: tc.id || '', arguments: tc.function?.arguments || '', }; recievedToolCalls.push(call); - return { ...call, arguments: parse(call.arguments || '{}') }; + const argsToParse = call.arguments || '{}'; + parsedToolCalls.push({ ...call, arguments: parse(argsToParse) }); } else { const existingCall = recievedToolCalls[tc.index]; existingCall.arguments += tc.function?.arguments || ''; - return { + const argsToParse = existingCall.arguments || '{}'; + parsedToolCalls.push({ ...existingCall, - arguments: parse(existingCall.arguments), - }; + arguments: parse(argsToParse), + }); } - }) || [], + } catch (parseErr) { + console.error('Error parsing tool call arguments:', parseErr, 'tc:', JSON.stringify(tc)); + parsedToolCalls.push({ + name: tc.function?.name || '', + id: tc.id || recievedToolCalls[tc.index]?.id || '', + arguments: {}, + }); + } + } + } + + yield { + contentChunk: chunk.choices[0].delta.content || '', + toolCallChunk: parsedToolCalls, done: chunk.choices[0].finish_reason !== null, additionalInfo: { finishReason: chunk.choices[0].finish_reason, @@ -195,8 +213,18 @@ class OpenAILLM extends BaseLLM { } async generateObject(input: GenerateObjectInput): Promise { - const response = await this.openAIClient.chat.completions.parse({ - messages: this.convertToOpenAIMessages(input.messages), + // Use chat.completions.create instead of chat.completions.parse + // for compatibility with OpenAI-compatible providers (OpenRouter, etc.) + // that don't support the /chat/completions/parse endpoint. + const response = await this.openAIClient.chat.completions.create({ + messages: [ + { + role: 'system', + content: + 'You must respond with valid JSON only. No markdown code blocks, no explanatory text.', + }, + ...this.convertToOpenAIMessages(input.messages), + ], model: this.config.model, temperature: input.options?.temperature ?? this.config.options?.temperature ?? 1.0, @@ -209,18 +237,27 @@ class OpenAILLM extends BaseLLM { this.config.options?.frequencyPenalty, presence_penalty: input.options?.presencePenalty ?? this.config.options?.presencePenalty, - response_format: zodResponseFormat(input.schema, 'object'), + response_format: { type: 'json_object' }, }); if (response.choices && response.choices.length > 0) { try { - return input.schema.parse( - JSON.parse( - repairJson(response.choices[0].message.content!, { - extractJson: true, - }) as string, - ), - ) as T; + const content = stripMarkdownFences( + response.choices[0].message.content || '', + ); + if (!content.trim()) { + throw new Error('Empty response from model'); + } + let repairedJson: string; + try { + repairedJson = repairJson(content, { + extractJson: true, + }) as string; + } catch (repairErr) { + console.error('repairJson failed on content:', content); + throw new Error(`Failed to repair JSON: ${repairErr}`); + } + return input.schema.parse(JSON.parse(repairedJson)) as T; } catch (err) { throw new Error(`Error parsing response from OpenAI: ${err}`); } @@ -230,11 +267,21 @@ class OpenAILLM extends BaseLLM { } async *streamObject(input: GenerateObjectInput): AsyncGenerator { - let recievedObj: string = ''; + let receivedObj: string = ''; - const stream = this.openAIClient.responses.stream({ + // Use chat.completions.create with streaming instead of responses.stream + // for compatibility with OpenAI-compatible providers (OpenRouter, etc.) + // that don't support the OpenAI Responses API. + const stream = await this.openAIClient.chat.completions.create({ model: this.config.model, - input: input.messages, + messages: [ + { + role: 'system', + content: + 'You must respond with valid JSON only. No markdown code blocks, no explanatory text.', + }, + ...this.convertToOpenAIMessages(input.messages), + ], temperature: input.options?.temperature ?? this.config.options?.temperature ?? 1.0, top_p: input.options?.topP ?? this.config.options?.topP, @@ -246,27 +293,23 @@ class OpenAILLM extends BaseLLM { this.config.options?.frequencyPenalty, presence_penalty: input.options?.presencePenalty ?? this.config.options?.presencePenalty, - text: { - format: zodTextFormat(input.schema, 'object'), - }, + stream: true, }); for await (const chunk of stream) { - if (chunk.type === 'response.output_text.delta' && chunk.delta) { - recievedObj += chunk.delta; + if (chunk.choices && chunk.choices.length > 0) { + const delta = chunk.choices[0].delta.content || ''; + receivedObj += delta; + + // Strip markdown fences if present + const cleanedObj = stripMarkdownFences(receivedObj); try { - yield parse(recievedObj) as T; + yield parse(cleanedObj) as T; } catch (err) { - console.log('Error parsing partial object from OpenAI:', err); + // Partial JSON may not be parseable yet, yield empty object yield {} as T; } - } else if (chunk.type === 'response.output_text.done' && chunk.text) { - try { - yield parse(chunk.text) as T; - } catch (err) { - throw new Error(`Error parsing response from OpenAI: ${err}`); - } } } } diff --git a/src/lib/searxng.ts b/src/lib/searxng.ts index a0bcd8355..3cc383189 100644 --- a/src/lib/searxng.ts +++ b/src/lib/searxng.ts @@ -41,8 +41,8 @@ export const searchSearxng = async ( const res = await fetch(url); const data = await res.json(); - const results: SearxngSearchResult[] = data.results; - const suggestions: string[] = data.suggestions; + const results: SearxngSearchResult[] = data.results || []; + const suggestions: string[] = data.suggestions || []; return { results, suggestions }; }; diff --git a/src/lib/utils/parseJson.ts b/src/lib/utils/parseJson.ts new file mode 100644 index 000000000..8346481ef --- /dev/null +++ b/src/lib/utils/parseJson.ts @@ -0,0 +1,43 @@ +/** + * Utilities for parsing JSON from LLM responses. + * + * Many LLMs (especially when accessed via OpenAI-compatible APIs like OpenRouter, + * LiteLLM, etc.) wrap JSON responses in markdown code fences even when + * response_format is set to json_object. These utilities help handle such cases. + */ + +/** + * Strip markdown code fences from a string. + * Handles both ```json and plain ``` fences. + * + * @example + * stripMarkdownFences('```json\n{"foo": "bar"}\n```') // '{"foo": "bar"}' + * stripMarkdownFences('{"foo": "bar"}') // '{"foo": "bar"}' + */ +export function stripMarkdownFences(text: string): string { + const trimmed = text.trim(); + if (trimmed.startsWith('```')) { + return trimmed + .replace(/^```(?:json)?\s*/i, '') + .replace(/```\s*$/, '') + .trim(); + } + return trimmed; +} + +/** + * Safely parse JSON, stripping markdown fences first. + * Returns undefined if parsing fails instead of throwing. + * + * @example + * safeParseJson('```json\n{"foo": "bar"}\n```') // { foo: 'bar' } + * safeParseJson('invalid') // undefined + */ +export function safeParseJson(text: string): T | undefined { + try { + const cleaned = stripMarkdownFences(text); + return JSON.parse(cleaned) as T; + } catch { + return undefined; + } +}