diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index bfb7e17b009..63920bad07b 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -55,6 +55,7 @@ export const dynamicProviders = [ "inception", "synthetic", "sap-ai-core", + "openai", // OpenAI Compatible - fetches models dynamically from custom baseUrl // kilocode_change end "deepinfra", "io-intelligence", @@ -101,9 +102,11 @@ export const isInternalProvider = (key: string): key is InternalProvider => * CustomProvider * * Custom providers are completely configurable within Roo Code settings. + * Note: "openai" was moved to dynamicProviders as it fetches models dynamically. */ - -export const customProviders = ["openai"] as const +// kilocode_change start +export const customProviders = [] as const +// kilocode_change end export type CustomProvider = (typeof customProviders)[number] @@ -706,6 +709,7 @@ export const modelIdKeysByProvider: Record = { kilocode: "kilocodeModel", bedrock: "apiModelId", vertex: "apiModelId", + openai: "openAiModelId", // kilocode_change - OpenAI Compatible provider "openai-native": "openAiModelId", ollama: "ollamaModelId", lmstudio: "lmStudioModelId", diff --git a/src/api/providers/fetchers/__tests__/openai.spec.ts b/src/api/providers/fetchers/__tests__/openai.spec.ts new file mode 100644 index 00000000000..9a8a89121c4 --- /dev/null +++ b/src/api/providers/fetchers/__tests__/openai.spec.ts @@ -0,0 +1,562 @@ +// kilocode_change - new file +// Mocks must come first, before imports +vi.mock("axios") + +import type { Mock } from "vitest" +import axios from "axios" +import { getOpenAiModels } from "../openai" + +const mockedAxios = axios as typeof axios & { + get: Mock + isAxiosError: Mock +} + +describe("getOpenAiModels", () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it("returns empty object when no baseUrl is provided", async () => { + const result = await getOpenAiModels({}) + + expect(mockedAxios.get).not.toHaveBeenCalled() + expect(result).toEqual({}) + }) + + it("returns empty object when baseUrl is undefined", async () => { + const result = await getOpenAiModels({ baseUrl: undefined }) + + expect(mockedAxios.get).not.toHaveBeenCalled() + expect(result).toEqual({}) + }) + + it("successfully fetches and formats OpenAI Compatible models", async () => { + const mockResponse = { + data: { + data: [ + { + id: "gpt-4-turbo", + object: "model", + created: 1699000000, + owned_by: "openai", + }, + { + id: "llama-3.1-70b", + object: "model", + created: 1699000001, + owned_by: "meta", + }, + ], + object: "list", + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(mockedAxios.get).toHaveBeenCalledWith("http://localhost:8080/v1/models", { + headers: { + "Content-Type": "application/json", + Authorization: "Bearer test-api-key", + }, + timeout: 10_000, + }) + + expect(result).toEqual({ + "gpt-4-turbo": { + maxTokens: 8192, + contextWindow: 32000, + supportsImages: false, + supportsPromptCache: false, + supportsComputerUse: false, + description: "gpt-4-turbo", + displayName: "gpt-4-turbo", + supportsReasoningEffort: false, + supportsReasoningBudget: false, + supportsTemperature: true, + supportsNativeTools: true, + defaultToolProtocol: "native", + }, + "llama-3.1-70b": { + maxTokens: 8192, + contextWindow: 32000, + supportsImages: false, + supportsPromptCache: false, + supportsComputerUse: false, + description: "llama-3.1-70b", + displayName: "llama-3.1-70b", + supportsReasoningEffort: false, + supportsReasoningBudget: false, + supportsTemperature: true, + supportsNativeTools: true, + defaultToolProtocol: "native", + }, + }) + }) + + it("handles base URLs with trailing slashes correctly", async () => { + const mockResponse = { + data: { + data: [], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1/", + apiKey: "test-api-key", + }) + + expect(mockedAxios.get).toHaveBeenCalledWith("http://localhost:8080/v1/models", { + headers: { + "Content-Type": "application/json", + Authorization: "Bearer test-api-key", + }, + timeout: 10_000, + }) + }) + + it("handles base URLs with multiple trailing slashes correctly", async () => { + const mockResponse = { + data: { + data: [], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1///", + apiKey: "test-api-key", + }) + + expect(mockedAxios.get).toHaveBeenCalledWith("http://localhost:8080/v1/models", { + headers: { + "Content-Type": "application/json", + Authorization: "Bearer test-api-key", + }, + timeout: 10_000, + }) + }) + + it("makes request without authorization header when no API key provided", async () => { + const mockResponse = { + data: { + data: [], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + }) + + expect(mockedAxios.get).toHaveBeenCalledWith("http://localhost:8080/v1/models", { + headers: { + "Content-Type": "application/json", + }, + timeout: 10_000, + }) + }) + + it("includes custom headers in request", async () => { + const mockResponse = { + data: { + data: [], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + headers: { + "X-Custom-Header": "custom-value", + "X-Another-Header": "another-value", + }, + }) + + expect(mockedAxios.get).toHaveBeenCalledWith("http://localhost:8080/v1/models", { + headers: { + "Content-Type": "application/json", + "X-Custom-Header": "custom-value", + "X-Another-Header": "another-value", + Authorization: "Bearer test-api-key", + }, + timeout: 10_000, + }) + }) + + it("returns empty object when data array is empty", async () => { + const mockResponse = { + data: { + data: [], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result).toEqual({}) + }) + + it("throws error for invalid response format", async () => { + const mockResponse = { + data: { + // Missing 'data' field + models: [], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + await expect( + getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }), + ).rejects.toThrow("OpenAI Compatible API returned invalid response format") + }) + + it("throws error for timeout", async () => { + const axiosError = { + code: "ECONNABORTED", + isAxiosError: true, + } + + mockedAxios.isAxiosError.mockReturnValue(true) + mockedAxios.get.mockRejectedValue(axiosError) + + await expect( + getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }), + ).rejects.toThrow("Failed to fetch OpenAI Compatible models from http://localhost:8080/v1: Request timeout") + }) + + it("throws detailed error for HTTP error responses", async () => { + const axiosError = { + response: { + status: 401, + statusText: "Unauthorized", + }, + isAxiosError: true, + } + + mockedAxios.isAxiosError.mockReturnValue(true) + mockedAxios.get.mockRejectedValue(axiosError) + + await expect( + getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "invalid-key", + }), + ).rejects.toThrow("Failed to fetch OpenAI Compatible models from http://localhost:8080/v1: 401 Unauthorized") + }) + + it("throws network error for request failures", async () => { + const axiosError = { + request: {}, + isAxiosError: true, + } + + mockedAxios.isAxiosError.mockReturnValue(true) + mockedAxios.get.mockRejectedValue(axiosError) + + await expect( + getOpenAiModels({ + baseUrl: "http://invalid-url/v1", + apiKey: "test-api-key", + }), + ).rejects.toThrow("Failed to fetch OpenAI Compatible models from http://invalid-url/v1: No response") + }) + + it("throws generic error for other failures", async () => { + const genericError = new Error("Network timeout") + + mockedAxios.isAxiosError.mockReturnValue(false) + mockedAxios.get.mockRejectedValue(genericError) + + await expect( + getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }), + ).rejects.toThrow("Failed to fetch OpenAI Compatible models from http://localhost:8080/v1: Network timeout") + }) + + it("handles models with minimal fields", async () => { + const mockResponse = { + data: { + data: [ + { + id: "minimal-model", + // Only id is required by schema + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result).toEqual({ + "minimal-model": { + maxTokens: 8192, + contextWindow: 32000, + supportsImages: false, + supportsPromptCache: false, + supportsComputerUse: false, + description: "minimal-model", + displayName: "minimal-model", + supportsReasoningEffort: false, + supportsReasoningBudget: false, + supportsTemperature: true, + supportsNativeTools: true, + defaultToolProtocol: "native", + }, + }) + }) + + // Tests for extended model info parsing (common fields) + describe("extended model info parsing", () => { + it("parses context_window for context window", async () => { + const mockResponse = { + data: { + data: [ + { + id: "extended-model", + context_window: 128000, + max_output_tokens: 16384, + supports_vision: true, + description: "A powerful vision model", + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["extended-model"]).toMatchObject({ + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + description: "A powerful vision model", + }) + }) + + it("parses context_length for context window", async () => { + const mockResponse = { + data: { + data: [ + { + id: "alt-model", + context_length: 65536, + max_tokens: 4096, + supports_images: true, + supports_function_calling: true, + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["alt-model"]).toMatchObject({ + maxTokens: 4096, + contextWindow: 65536, + supportsImages: true, + supportsNativeTools: true, + }) + }) + + it("parses pricing info when provided", async () => { + const mockResponse = { + data: { + data: [ + { + id: "priced-model", + input_cost_per_token: 0.000003, // $3 per million + output_cost_per_token: 0.000015, // $15 per million + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["priced-model"]).toMatchObject({ + inputPrice: 3, + outputPrice: 15, + }) + }) + + it("prioritizes context_window over context_length", async () => { + const mockResponse = { + data: { + data: [ + { + id: "priority-model", + context_window: 200000, // Should use this + context_length: 100000, // Should ignore this + max_context_length: 50000, // Should ignore this + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["priority-model"].contextWindow).toBe(200000) + }) + + it("prioritizes max_output_tokens over max_tokens", async () => { + const mockResponse = { + data: { + data: [ + { + id: "tokens-model", + max_output_tokens: 64000, // Should use this + max_tokens: 8192, // Should ignore this + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["tokens-model"].maxTokens).toBe(64000) + }) + + it("parses max_completion_tokens for output tokens", async () => { + const mockResponse = { + data: { + data: [ + { + id: "completion-tokens-model", + context_window: 131072, + max_completion_tokens: 65536, + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["completion-tokens-model"].maxTokens).toBe(65536) + expect(result["completion-tokens-model"].contextWindow).toBe(131072) + }) + + it("defaults supportsNativeTools to true when tool flags not provided", async () => { + const mockResponse = { + data: { + data: [ + { + id: "no-tool-flags-model", + // No supports_function_calling or supports_tools + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["no-tool-flags-model"].supportsNativeTools).toBe(true) + }) + + it("respects supports_tools=false when explicitly set", async () => { + const mockResponse = { + data: { + data: [ + { + id: "no-tools-model", + supports_tools: false, + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["no-tools-model"].supportsNativeTools).toBe(false) + }) + + it("parses vision flag correctly", async () => { + const mockResponse = { + data: { + data: [ + { + id: "vision-model", + vision: true, + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getOpenAiModels({ + baseUrl: "http://localhost:8080/v1", + apiKey: "test-api-key", + }) + + expect(result["vision-model"].supportsImages).toBe(true) + }) + }) +}) diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index f4c2ab1ae47..3551fc08856 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -40,6 +40,7 @@ import { getHuggingFaceModels } from "./huggingface" import { getRooModels } from "./roo" import { getChutesModels } from "./chutes" import { getNanoGptModels } from "./nano-gpt" //kilocode_change +import { getOpenAiModels } from "./openai" // kilocode_change: OpenAI Compatible provider const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) @@ -173,6 +174,14 @@ async function fetchModelsFromProvider(options: GetModelsOptions): Promise + +type OpenAiModel = z.infer + +/** + * Common extended model fields that OpenAI-compatible providers may include. + * These are NOT part of the official OpenAI spec but are widely used. + */ +interface ExtendedModelFields { + // Context/token limits (common naming conventions) + context_window?: number + context_length?: number + max_context_length?: number + max_input_tokens?: number + max_tokens?: number + max_output_tokens?: number + max_completion_tokens?: number + // Capability flags + supports_vision?: boolean + supports_images?: boolean + vision?: boolean + supports_function_calling?: boolean + supports_tools?: boolean + // Pricing (in cost per token) + input_cost_per_token?: number + output_cost_per_token?: number + // Description + description?: string +} + +/** + * Parse an OpenAI model into our ModelInfo format. + * + * Handles common extended fields that many OpenAI-compatible providers include. + */ +function parseOpenAiModel(model: OpenAiModel): ModelInfo { + // Cast to access extended fields that some providers include + const extendedModel = model as OpenAiModel & ExtendedModelFields + + // Parse context window from common field names + const contextWindow = + extendedModel.context_window ?? + extendedModel.context_length ?? + extendedModel.max_context_length ?? + extendedModel.max_input_tokens ?? + 32000 // Safe default for most models + + // Parse max output tokens from common field names + const maxTokens = + extendedModel.max_output_tokens ?? extendedModel.max_completion_tokens ?? extendedModel.max_tokens ?? 8192 // Conservative default + + // Parse vision/image support + const supportsImages = + extendedModel.supports_vision ?? extendedModel.supports_images ?? extendedModel.vision ?? false + + // Parse tool support (default to true as most modern models support tools) + const supportsNativeTools = extendedModel.supports_function_calling ?? extendedModel.supports_tools ?? true + + // Parse pricing (convert from per-token to per-million-tokens if provided) + const inputPrice = extendedModel.input_cost_per_token ? extendedModel.input_cost_per_token * 1_000_000 : undefined + const outputPrice = extendedModel.output_cost_per_token + ? extendedModel.output_cost_per_token * 1_000_000 + : undefined + + return { + maxTokens, + contextWindow, + supportsImages, + supportsPromptCache: false, + supportsComputerUse: false, + description: extendedModel.description ?? model.id, + // Use the full model ID as display name to avoid mangling by prettyModelName() + displayName: model.id, + supportsReasoningEffort: false, + supportsReasoningBudget: false, + supportsTemperature: true, + supportsNativeTools, + defaultToolProtocol: "native", + ...(inputPrice !== undefined && { inputPrice }), + ...(outputPrice !== undefined && { outputPrice }), + } +} + +export interface GetOpenAiModelsOptions { + baseUrl?: string + apiKey?: string + headers?: Record +} + +export async function getOpenAiModels(options: GetOpenAiModelsOptions): Promise> { + const models: Record = {} + + // Note: baseUrl is required for OpenAI Compatible providers + // If not provided, we return empty models rather than defaulting to api.openai.com + // since users should explicitly configure their endpoint + if (!options.baseUrl) { + console.warn("OpenAI Compatible: No baseUrl provided, returning empty model list") + return models + } + + const baseUrl = options.baseUrl + + try { + const requestHeaders: Record = { + "Content-Type": "application/json", + ...options.headers, + } + + if (options.apiKey) { + requestHeaders.Authorization = `Bearer ${options.apiKey}` + } + + // Ensure baseUrl doesn't have trailing slash and append /models + const modelsUrl = `${baseUrl.replace(/\/+$/, "")}/models` + + const response = await axios.get(modelsUrl, { + headers: requestHeaders, + timeout: 10_000, + }) + + const result = openAiModelsResponseSchema.safeParse(response.data) + if (!result.success) { + console.error("OpenAI Compatible models response validation failed:", result.error.format()) + throw new Error( + `OpenAI Compatible API returned invalid response format. Validation errors: ${JSON.stringify(result.error.format())}`, + ) + } + + if (result.data.data.length === 0) { + console.warn(`OpenAI Compatible (${baseUrl}): API returned empty model list`) + } + + for (const model of result.data.data) { + models[model.id] = parseOpenAiModel(model) + } + + return models + } catch (error) { + console.error(`Error fetching OpenAI Compatible models from ${baseUrl}:`, error) + + if (axios.isAxiosError(error)) { + if (error.code === "ECONNABORTED") { + const timeoutError = new Error( + `Failed to fetch OpenAI Compatible models from ${baseUrl}: Request timeout`, + ) + ;(timeoutError as any).cause = error + throw timeoutError + } else if (error.response) { + const responseError = new Error( + `Failed to fetch OpenAI Compatible models from ${baseUrl}: ${error.response.status} ${error.response.statusText}`, + ) + ;(responseError as any).cause = error + throw responseError + } else if (error.request) { + const requestError = new Error(`Failed to fetch OpenAI Compatible models from ${baseUrl}: No response`) + ;(requestError as any).cause = error + throw requestError + } + } + + const fetchError = new Error( + `Failed to fetch OpenAI Compatible models from ${baseUrl}: ${error instanceof Error ? error.message : "Unknown error"}`, + ) + ;(fetchError as any).cause = error + throw fetchError + } +} diff --git a/src/core/webview/__tests__/ClineProvider.spec.ts b/src/core/webview/__tests__/ClineProvider.spec.ts index bb75ead1436..e3da0dda04f 100644 --- a/src/core/webview/__tests__/ClineProvider.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.spec.ts @@ -2755,6 +2755,7 @@ describe("ClineProvider - Router Models", () => { // kilocode_change start geminiApiKey: "gemini-key", googleGeminiBaseUrl: "https://gemini.example.com", + openAiApiKey: "openai-key", // kilocode_change: openai nanoGptApiKey: "nano-gpt-key", ovhCloudAiEndpointsApiKey: "ovhcloud-key", inceptionLabsApiKey: "inception-key", @@ -2823,6 +2824,7 @@ describe("ClineProvider - Router Models", () => { deepinfra: mockModels, openrouter: mockModels, gemini: mockModels, // kilocode_change + openai: mockModels, // kilocode_change: openai requesty: mockModels, glama: mockModels, // kilocode_change synthetic: mockModels, // kilocode_change @@ -2861,6 +2863,7 @@ describe("ClineProvider - Router Models", () => { chutesApiKey: "chutes-key", geminiApiKey: "gemini-key", googleGeminiBaseUrl: "https://gemini.example.com", + openAiApiKey: "openai-key", // kilocode_change: openai nanoGptApiKey: "nano-gpt-key", // kilocode_change ovhCloudAiEndpointsApiKey: "ovhcloud-key", inceptionLabsApiKey: "inception-key", @@ -2879,6 +2882,7 @@ describe("ClineProvider - Router Models", () => { vi.mocked(getModels) .mockResolvedValueOnce(mockModels) // openrouter success .mockResolvedValueOnce(mockModels) // kilocode_change: gemini success + .mockRejectedValueOnce(new Error("OpenAI API error")) // kilocode_change: openai fail .mockRejectedValueOnce(new Error("Requesty API error")) // .mockResolvedValueOnce(mockModels) // kilocode_change glama success .mockRejectedValueOnce(new Error("Unbound API error")) // unbound fail @@ -2903,6 +2907,7 @@ describe("ClineProvider - Router Models", () => { deepinfra: mockModels, openrouter: mockModels, gemini: mockModels, // kilocode_change + openai: {}, // kilocode_change: openai requesty: {}, glama: mockModels, // kilocode_change unbound: {}, @@ -2925,6 +2930,13 @@ describe("ClineProvider - Router Models", () => { }) // Verify error messages were sent for failed providers + expect(mockPostMessage).toHaveBeenCalledWith({ + type: "singleRouterModelFetchResponse", + success: false, + error: "OpenAI API error", + values: { provider: "openai" }, + }) // kilocode_change: openai + expect(mockPostMessage).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, @@ -3029,6 +3041,7 @@ describe("ClineProvider - Router Models", () => { glamaApiKey: "glama-key", // kilocode_change unboundApiKey: "unbound-key", // kilocode_change start + openAiApiKey: "openai-key", // kilocode_change: openai ovhCloudAiEndpointsApiKey: "ovhcloud-key", chutesApiKey: "chutes-key", nanoGptApiKey: "nano-gpt-key", @@ -3059,6 +3072,7 @@ describe("ClineProvider - Router Models", () => { deepinfra: mockModels, openrouter: mockModels, gemini: mockModels, // kilocode_change + openai: mockModels, // kilocode_change: openai requesty: mockModels, glama: mockModels, // kilocode_change unbound: mockModels, diff --git a/src/core/webview/__tests__/webviewMessageHandler.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.spec.ts index cf35a7040cb..89889f87f06 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.spec.ts @@ -200,6 +200,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { ovhCloudAiEndpointsApiKey: "ovhcloud-key", inceptionLabsApiKey: "inception-key", inceptionLabsBaseUrl: "https://api.inceptionlabs.ai/v1/", + openAiApiKey: "openai-key", // kilocode_change end }, }) @@ -272,6 +273,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { routerModels: { deepinfra: mockModels, openrouter: mockModels, + openai: mockModels, // kilocode_change gemini: mockModels, // kilocode_change requesty: mockModels, glama: mockModels, // kilocode_change @@ -378,6 +380,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { routerModels: { deepinfra: mockModels, openrouter: mockModels, + openai: mockModels, // kilocode_change gemini: mockModels, // kilocode_change requesty: mockModels, glama: mockModels, // kilocode_change @@ -414,6 +417,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { // Mock some providers to succeed and others to fail mockGetModels .mockResolvedValueOnce(mockModels) // openrouter + .mockResolvedValueOnce(mockModels) // kilocode_change: openai .mockResolvedValueOnce(mockModels) // kilocode_change: gemini .mockRejectedValueOnce(new Error("Requesty API error")) // requesty .mockResolvedValueOnce(mockModels) // kilocode_change: glama @@ -485,6 +489,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { routerModels: { deepinfra: mockModels, openrouter: mockModels, + openai: mockModels, // kilocode_change requesty: {}, glama: mockModels, // kilocode_change unbound: {}, @@ -514,7 +519,8 @@ describe("webviewMessageHandler - requestRouterModels", () => { // Mock providers to fail with different error types mockGetModels .mockRejectedValueOnce(new Error("Structured error message")) // openrouter - .mockRejectedValueOnce(new Error("Gemini API error")) // // kilocode_change: gemini + .mockRejectedValueOnce(new Error("Gemini API error")) // kilocode_change: gemini + .mockRejectedValueOnce(new Error("OpenAI API error")) // kilocode_change: openai .mockRejectedValueOnce(new Error("Requesty API error")) // requesty .mockRejectedValueOnce(new Error("Glama API error")) // kilocode_change: glama .mockRejectedValueOnce(new Error("Unbound API error")) // unbound @@ -543,6 +549,13 @@ describe("webviewMessageHandler - requestRouterModels", () => { }) // kilocode_change start + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "singleRouterModelFetchResponse", + success: false, + error: "OpenAI API error", + values: { provider: "openai" }, + }) + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index 99deac1e3cb..c70d8720301 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -851,6 +851,7 @@ export const webviewMessageHandler = async ( inception: {}, kilocode: {}, gemini: {}, + openai: {}, // OpenAI Compatible provider // kilocode_change end openrouter: {}, "vercel-ai-gateway": {}, @@ -901,6 +902,17 @@ export const webviewMessageHandler = async ( baseUrl: apiConfiguration.googleGeminiBaseUrl, }, }, + // kilocode_change start - OpenAI Compatible provider for chat panel model selection + { + key: "openai", + options: { + provider: "openai", + apiKey: apiConfiguration.openAiApiKey, + baseUrl: apiConfiguration.openAiBaseUrl, + headers: apiConfiguration.openAiHeaders, + }, + }, + // kilocode_change end { key: "requesty", options: { diff --git a/src/shared/api.ts b/src/shared/api.ts index 2a956139238..238c2a97cd4 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -195,6 +195,7 @@ const dynamicProviderExtras = { unbound: {} as { apiKey?: string }, // kilocode_change start glama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type + openai: {} as { apiKey?: string; baseUrl?: string; headers?: Record }, // OpenAI Compatible provider // kilocode_change end "nano-gpt": {} as { nanoGptModelList?: "all" | "personalized" | "subscription" }, // kilocode_change ollama: {} as { numCtx?: number }, // kilocode_change diff --git a/webview-ui/src/components/kilocode/hooks/__tests__/getModelsByProvider.spec.ts b/webview-ui/src/components/kilocode/hooks/__tests__/getModelsByProvider.spec.ts index d1fe30c8723..d733bb9be8e 100644 --- a/webview-ui/src/components/kilocode/hooks/__tests__/getModelsByProvider.spec.ts +++ b/webview-ui/src/components/kilocode/hooks/__tests__/getModelsByProvider.spec.ts @@ -42,6 +42,7 @@ describe("getModelsByProvider", () => { synthetic: { "test-model": testModel }, inception: { "test-model": testModel }, roo: { "test-model": testModel }, + openai: { "test-model": testModel }, } it("returns models for all providers", () => { @@ -50,7 +51,6 @@ describe("getModelsByProvider", () => { "huggingface", // don't know what this is "human-relay", // no models "nano-gpt", // dynamic provider - models fetched from API - "openai", // not implemented "roo", // don't care "virtual-quota-fallback", // no models "vercel-ai-gateway", // different structure diff --git a/webview-ui/src/components/kilocode/hooks/useProviderModels.ts b/webview-ui/src/components/kilocode/hooks/useProviderModels.ts index 9cf7a166bcd..44a34c343b1 100644 --- a/webview-ui/src/components/kilocode/hooks/useProviderModels.ts +++ b/webview-ui/src/components/kilocode/hooks/useProviderModels.ts @@ -174,9 +174,8 @@ export const getModelsByProvider = ({ } } case "openai": { - // TODO(catrielmuller): Support the fetch here return { - models: {}, + models: routerModels.openai || {}, defaultModel: "", } } @@ -364,6 +363,10 @@ export const useProviderModels = (apiConfiguration?: ProviderSettings) => { nanoGptModelList: apiConfiguration?.nanoGptModelList, //kilocode_change end syntheticApiKey: apiConfiguration?.syntheticApiKey, // kilocode_change + // OpenAI Compatible provider parameters + openAiApiKey: apiConfiguration?.openAiApiKey, + openAiBaseUrl: apiConfiguration?.openAiBaseUrl, + openAiHeaders: apiConfiguration?.openAiHeaders, }) const options = getOptionsForProvider(provider, apiConfiguration) diff --git a/webview-ui/src/components/ui/hooks/useRouterModels.ts b/webview-ui/src/components/ui/hooks/useRouterModels.ts index bef96d7c5c0..f88975e2c96 100644 --- a/webview-ui/src/components/ui/hooks/useRouterModels.ts +++ b/webview-ui/src/components/ui/hooks/useRouterModels.ts @@ -66,6 +66,10 @@ type RouterModelsQueryKey = { nanoGptApiKey?: string nanoGptModelList?: "all" | "personalized" | "subscription" syntheticApiKey?: string + // OpenAI Compatible provider parameters + openAiApiKey?: string + openAiBaseUrl?: string + openAiHeaders?: Record // Requesty, Unbound, etc should perhaps also be here, but they already have their own hacks for reloading } // kilocode_change end diff --git a/webview-ui/src/utils/__tests__/prettyModelName.spec.ts b/webview-ui/src/utils/__tests__/prettyModelName.spec.ts new file mode 100644 index 00000000000..e4d2c84c2a2 --- /dev/null +++ b/webview-ui/src/utils/__tests__/prettyModelName.spec.ts @@ -0,0 +1,67 @@ +import { prettyModelName } from "../prettyModelName" + +describe("prettyModelName", () => { + it("should return empty string for empty input", () => { + expect(prettyModelName("")).toBe("") + }) + + it("should handle simple model name without slashes", () => { + // The function only capitalizes first character of project name, not each word + expect(prettyModelName("gpt-4")).toBe("Gpt 4 ") + }) + + it("should handle model name with single slash", () => { + expect(prettyModelName("anthropic/claude-3")).toBe("Anthropic / Claude 3 ") + }) + + it("should handle model name with colon tag", () => { + expect(prettyModelName("anthropic/claude-3:thinking")).toBe("Anthropic / Claude 3 (Thinking)") + }) + + it("should handle model name with multiple slashes - the key fix", () => { + // This is the key bug fix - model IDs like "chutes/moonshotai/Kimi-K2-Instruct" + // should preserve everything after the first slash + // With the fix, we now get the full model name preserved + const result = prettyModelName("chutes/moonshotai/Kimi-K2-Instruct") + // Should contain the full model name, not just "Chutes / Moonshotai" + expect(result).toContain("Chutes") + expect(result).toContain("Moonshotai") + expect(result).toContain("Kimi") + expect(result).toContain("K2") + expect(result).toContain("Instruct") + }) + + it("should handle real Chutes model IDs with single slash", () => { + // Actual Chutes API model ID format + expect(prettyModelName("moonshotai/Kimi-K2-Instruct-0905")).toBe("Moonshotai / Kimi K2 Instruct 0905 ") + }) + + it("should handle NousResearch model IDs", () => { + // The function preserves the case in the word after capitalizing first char + expect(prettyModelName("NousResearch/Hermes-4-70B")).toBe("NousResearch / Hermes 4 70B ") + }) + + it("should handle Qwen model IDs", () => { + expect(prettyModelName("Qwen/Qwen3-235B-A22B")).toBe("Qwen / Qwen3 235B A22B ") + }) + + it("should handle model ID with multiple slashes and tag", () => { + const result = prettyModelName("provider/org/model-name:variant") + expect(result).toContain("Provider") + expect(result).toContain("Org") + // Note: The function only capitalizes first char of each dash-word, so "model" stays lowercase + expect(result).toContain("model") + expect(result).toContain("Name") + expect(result).toContain("(Variant)") + }) + + it("should handle deepseek model IDs with dash in project name", () => { + // Note: project name with dash is just capitalized first char, doesn't split on dash + expect(prettyModelName("deepseek-ai/DeepSeek-R1-0528")).toBe("Deepseek-ai / DeepSeek R1 0528 ") + }) + + it("should preserve uppercase in model names", () => { + // The function capitalizes first char of each dash-separated word + expect(prettyModelName("openai/GPT-4o")).toBe("Openai / GPT 4o ") + }) +}) diff --git a/webview-ui/src/utils/__tests__/validate.test.ts b/webview-ui/src/utils/__tests__/validate.test.ts index d3ec33b91c8..793d7e867d0 100644 --- a/webview-ui/src/utils/__tests__/validate.test.ts +++ b/webview-ui/src/utils/__tests__/validate.test.ts @@ -73,6 +73,7 @@ describe("Model Validation Functions", () => { // kilocode_change end roo: {}, chutes: {}, + openai: {}, } const allowAllOrganization: OrganizationAllowList = { diff --git a/webview-ui/src/utils/prettyModelName.ts b/webview-ui/src/utils/prettyModelName.ts index 86af618803d..8855f767986 100644 --- a/webview-ui/src/utils/prettyModelName.ts +++ b/webview-ui/src/utils/prettyModelName.ts @@ -5,7 +5,9 @@ export const prettyModelName = (modelId: string): string => { const [mainId, tag] = modelId.split(":") const projectName = mainId.includes("/") ? mainId.split("/")[0] : "" - const modelName = mainId.includes("/") ? mainId.split("/")[1] : mainId + // Use slice(1).join("/") instead of split("/")[1] to preserve everything after the first slash + // This handles model IDs with multiple slashes like "provider/org/model-name" + const modelName = mainId.includes("/") ? mainId.split("/").slice(1).join("/") : mainId // Capitalize each word and join with spaces const formattedProject = projectName ? projectName.charAt(0).toUpperCase() + projectName.slice(1) : ""