Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/lib/agents/search/researcher/actions/webSearch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,13 @@ const webSearchAction: ResearchAction<typeof actionSchema> = {
config.sources.includes('web') &&
config.classification.classification.skipSearch === false,
execute: async (input, additionalConfig) => {
// Guard against undefined or empty queries
if (!input.queries || !Array.isArray(input.queries) || input.queries.length === 0) {
return {
type: 'search_results',
results: [],
};
}
input.queries = input.queries.slice(0, 3);

const researchBlock = additionalConfig.session.getBlock(
Expand Down
3 changes: 2 additions & 1 deletion src/lib/agents/search/researcher/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,13 @@ class Researcher {
},
});

const chatHistory = input.chatHistory || [];
const agentMessageHistory: Message[] = [
{
role: 'user',
content: `
<conversation>
${formatChatHistoryAsString(input.chatHistory.slice(-10))}
${formatChatHistoryAsString(chatHistory.slice(-10))}
User: ${input.followUp} (Standalone question: ${input.classification.standaloneFollowUp})
</conversation>
`,
Expand Down
14 changes: 11 additions & 3 deletions src/lib/models/providers/ollama/ollamaLLM.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import { parse } from 'partial-json';
import crypto from 'crypto';
import { Message } from '@/lib/types';
import { repairJson } from '@toolsycc/json-repair';
import { stripMarkdownFences } from '@/lib/utils/parseJson';

type OllamaConfig = {
baseURL: string;
Expand Down Expand Up @@ -206,9 +207,13 @@ class OllamaLLM extends BaseLLM<OllamaConfig> {
});

try {
const content = stripMarkdownFences(response.message.content);
if (!content.trim()) {
throw new Error('Empty response from model');
}
return input.schema.parse(
JSON.parse(
repairJson(response.message.content, {
repairJson(content, {
extractJson: true,
}) as string,
),
Expand Down Expand Up @@ -248,10 +253,13 @@ class OllamaLLM extends BaseLLM<OllamaConfig> {
for await (const chunk of stream) {
recievedObj += chunk.message.content;

// Strip markdown fences if present
const cleanedObj = stripMarkdownFences(recievedObj);

try {
yield parse(recievedObj) as T;
yield parse(cleanedObj) as T;
} catch (err) {
console.log('Error parsing partial object from Ollama:', err);
// Partial JSON may not be parseable yet, yield empty object
yield {} as T;
}
}
Expand Down
133 changes: 88 additions & 45 deletions src/lib/models/providers/openai/openaiLLM.ts
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
import OpenAI from 'openai';
import BaseLLM from '../../base/llm';
import { zodTextFormat, zodResponseFormat } from 'openai/helpers/zod';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
ToolCall,
} from '../../types';
import { parse } from 'partial-json';
import z from 'zod';
Expand All @@ -19,6 +17,7 @@ import {
} from 'openai/resources/index.mjs';
import { Message } from '@/lib/types';
import { repairJson } from '@toolsycc/json-repair';
import { stripMarkdownFences } from '@/lib/utils/parseJson';

type OpenAIConfig = {
apiKey: string;
Expand All @@ -44,21 +43,24 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
if (msg.role === 'tool') {
return {
role: 'tool',
tool_call_id: msg.id,
content: msg.content,
tool_call_id: msg.id || '',
content: msg.content || '',
} as ChatCompletionToolMessageParam;
} else if (msg.role === 'assistant') {
return {
role: 'assistant',
content: msg.content,
content: msg.content || '',
...(msg.tool_calls &&
msg.tool_calls.length > 0 && {
tool_calls: msg.tool_calls?.map((tc) => ({
id: tc.id,
id: tc.id || '',
type: 'function',
function: {
name: tc.name,
arguments: JSON.stringify(tc.arguments),
name: tc.name || '',
arguments:
typeof tc.arguments === 'string'
? tc.arguments
: JSON.stringify(tc.arguments || {}),
},
})),
}),
Expand Down Expand Up @@ -164,27 +166,43 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const toolCalls = chunk.choices[0].delta.tool_calls;
yield {
contentChunk: chunk.choices[0].delta.content || '',
toolCallChunk:
toolCalls?.map((tc) => {
let parsedToolCalls: any[] = [];

if (toolCalls) {
for (const tc of toolCalls) {
try {
if (!recievedToolCalls[tc.index]) {
const call = {
name: tc.function?.name!,
id: tc.id!,
name: tc.function?.name || '',
id: tc.id || '',
arguments: tc.function?.arguments || '',
};
recievedToolCalls.push(call);
return { ...call, arguments: parse(call.arguments || '{}') };
const argsToParse = call.arguments || '{}';
parsedToolCalls.push({ ...call, arguments: parse(argsToParse) });
} else {
const existingCall = recievedToolCalls[tc.index];
existingCall.arguments += tc.function?.arguments || '';
return {
const argsToParse = existingCall.arguments || '{}';
parsedToolCalls.push({
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Mar 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Parse failures in streaming tool calls are now swallowed and emitted as a synthetic {} arguments tool call, which downstream consumers execute without validation. This can trigger incorrect tool execution instead of surfacing the parse error.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At src/lib/models/providers/openai/openaiLLM.ts, line 187:

<comment>Parse failures in streaming tool calls are now swallowed and emitted as a synthetic `{}` arguments tool call, which downstream consumers execute without validation. This can trigger incorrect tool execution instead of surfacing the parse error.</comment>

<file context>
@@ -163,27 +166,43 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
                 existingCall.arguments += tc.function?.arguments || '';
-                return {
+                const argsToParse = existingCall.arguments || '{}';
+                parsedToolCalls.push({
                   ...existingCall,
-                  arguments: parse(existingCall.arguments),
</file context>
Fix with Cubic

...existingCall,
arguments: parse(existingCall.arguments),
};
arguments: parse(argsToParse),
});
}
}) || [],
} catch (parseErr) {
console.error('Error parsing tool call arguments:', parseErr, 'tc:', JSON.stringify(tc));
parsedToolCalls.push({
name: tc.function?.name || '',
id: tc.id || recievedToolCalls[tc.index]?.id || '',
arguments: {},
});
}
}
}

yield {
contentChunk: chunk.choices[0].delta.content || '',
toolCallChunk: parsedToolCalls,
done: chunk.choices[0].finish_reason !== null,
additionalInfo: {
finishReason: chunk.choices[0].finish_reason,
Expand All @@ -195,8 +213,18 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
}

async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const response = await this.openAIClient.chat.completions.parse({
messages: this.convertToOpenAIMessages(input.messages),
// Use chat.completions.create instead of chat.completions.parse
// for compatibility with OpenAI-compatible providers (OpenRouter, etc.)
// that don't support the /chat/completions/parse endpoint.
const response = await this.openAIClient.chat.completions.create({
messages: [
{
role: 'system',
content:
'You must respond with valid JSON only. No markdown code blocks, no explanatory text.',
},
...this.convertToOpenAIMessages(input.messages),
],
model: this.config.model,
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
Expand All @@ -209,18 +237,27 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
response_format: zodResponseFormat(input.schema, 'object'),
response_format: { type: 'json_object' },
});

if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(
JSON.parse(
repairJson(response.choices[0].message.content!, {
extractJson: true,
}) as string,
),
) as T;
const content = stripMarkdownFences(
response.choices[0].message.content || '',
);
if (!content.trim()) {
throw new Error('Empty response from model');
}
let repairedJson: string;
try {
repairedJson = repairJson(content, {
extractJson: true,
}) as string;
} catch (repairErr) {
console.error('repairJson failed on content:', content);
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Mar 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1: Raw model output is logged on JSON-repair failure, which can leak sensitive content into server logs.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At src/lib/models/providers/openai/openaiLLM.ts, line 257:

<comment>Raw model output is logged on JSON-repair failure, which can leak sensitive content into server logs.</comment>

<file context>
@@ -229,13 +248,16 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
+            extractJson: true,
+          }) as string;
+        } catch (repairErr) {
+          console.error('repairJson failed on content:', content);
+          throw new Error(`Failed to repair JSON: ${repairErr}`);
+        }
</file context>
Suggested change
console.error('repairJson failed on content:', content);
console.error('repairJson failed', {
error: repairErr instanceof Error ? repairErr.message : String(repairErr),
contentLength: content.length,
});
Fix with Cubic

throw new Error(`Failed to repair JSON: ${repairErr}`);
}
return input.schema.parse(JSON.parse(repairedJson)) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
Expand All @@ -230,11 +267,21 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
}

async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
let recievedObj: string = '';
let receivedObj: string = '';

const stream = this.openAIClient.responses.stream({
// Use chat.completions.create with streaming instead of responses.stream
// for compatibility with OpenAI-compatible providers (OpenRouter, etc.)
// that don't support the OpenAI Responses API.
const stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
input: input.messages,
messages: [
{
role: 'system',
content:
'You must respond with valid JSON only. No markdown code blocks, no explanatory text.',
},
...this.convertToOpenAIMessages(input.messages),
],
temperature:
input.options?.temperature ?? this.config.options?.temperature ?? 1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
Expand All @@ -246,27 +293,23 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
text: {
format: zodTextFormat(input.schema, 'object'),
},
stream: true,
});

for await (const chunk of stream) {
if (chunk.type === 'response.output_text.delta' && chunk.delta) {
recievedObj += chunk.delta;
if (chunk.choices && chunk.choices.length > 0) {
const delta = chunk.choices[0].delta.content || '';
receivedObj += delta;

// Strip markdown fences if present
const cleanedObj = stripMarkdownFences(receivedObj);

try {
yield parse(recievedObj) as T;
yield parse(cleanedObj) as T;
} catch (err) {
console.log('Error parsing partial object from OpenAI:', err);
// Partial JSON may not be parseable yet, yield empty object
yield {} as T;
}
} else if (chunk.type === 'response.output_text.done' && chunk.text) {
try {
yield parse(chunk.text) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/lib/searxng.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ export const searchSearxng = async (
const res = await fetch(url);
const data = await res.json();

const results: SearxngSearchResult[] = data.results;
const suggestions: string[] = data.suggestions;
const results: SearxngSearchResult[] = data.results || [];
const suggestions: string[] = data.suggestions || [];

return { results, suggestions };
};
43 changes: 43 additions & 0 deletions src/lib/utils/parseJson.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/**
* Utilities for parsing JSON from LLM responses.
*
* Many LLMs (especially when accessed via OpenAI-compatible APIs like OpenRouter,
* LiteLLM, etc.) wrap JSON responses in markdown code fences even when
* response_format is set to json_object. These utilities help handle such cases.
*/

/**
* Strip markdown code fences from a string.
* Handles both ```json and plain ``` fences.
*
* @example
* stripMarkdownFences('```json\n{"foo": "bar"}\n```') // '{"foo": "bar"}'
* stripMarkdownFences('{"foo": "bar"}') // '{"foo": "bar"}'
*/
export function stripMarkdownFences(text: string): string {
const trimmed = text.trim();
if (trimmed.startsWith('```')) {
return trimmed
.replace(/^```(?:json)?\s*/i, '')
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Mar 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Opening fence stripping only handles plain or json fences; other common language tags (js, jsonc, etc.) leave the tag in the string and cause JSON parsing to fail.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At src/lib/utils/parseJson.ts, line 21:

<comment>Opening fence stripping only handles plain or `json` fences; other common language tags (```js, ```jsonc, etc.) leave the tag in the string and cause JSON parsing to fail.</comment>

<file context>
@@ -0,0 +1,43 @@
+  const trimmed = text.trim();
+  if (trimmed.startsWith('```')) {
+    return trimmed
+      .replace(/^```(?:json)?\s*/i, '')
+      .replace(/```\s*$/, '')
+      .trim();
</file context>
Fix with Cubic

.replace(/```\s*$/, '')
.trim();
}
return trimmed;
}

/**
* Safely parse JSON, stripping markdown fences first.
* Returns undefined if parsing fails instead of throwing.
*
* @example
* safeParseJson('```json\n{"foo": "bar"}\n```') // { foo: 'bar' }
* safeParseJson('invalid') // undefined
*/
export function safeParseJson<T = unknown>(text: string): T | undefined {
try {
const cleaned = stripMarkdownFences(text);
return JSON.parse(cleaned) as T;
} catch {
return undefined;
}
}