diff --git a/.changeset/disable-questions-yolo-mode.md b/.changeset/disable-questions-yolo-mode.md index f8604768e2f..1d60e4395a8 100644 --- a/.changeset/disable-questions-yolo-mode.md +++ b/.changeset/disable-questions-yolo-mode.md @@ -3,6 +3,7 @@ --- Disable ask_followup_question tool when yolo mode is enabled to prevent the agent from asking itself questions and auto-answering them. Applied to: + - XML tool descriptions (system prompt) - Native tool filtering - Tool execution (returns error message if model still tries to use the tool from conversation history) diff --git a/.changeset/openai-compatible-settings-improvements.md b/.changeset/openai-compatible-settings-improvements.md new file mode 100644 index 00000000000..c2d051eb2fd --- /dev/null +++ b/.changeset/openai-compatible-settings-improvements.md @@ -0,0 +1,10 @@ +--- +"kilo-code": minor +--- + +Add reasoning and capability controls for OpenAI Compatible models + +- Added checkboxes for 'Supports Reasoning', 'Supports Function Calling', and 'Supports Computer Use' to the OpenAI Compatible settings UI. +- Compacted the capability checkboxes into a 2-column grid layout with tooltip-only descriptions. +- Updated OpenAiHandler to inject the 'thinking' parameter when reasoning is enabled and the model supports it. +- Gated tool inclusion based on the 'supportsNativeTools' flag. diff --git a/.github/workflows/code-qa.yml b/.github/workflows/code-qa.yml index 5d5733d025e..3eca6541b3c 100644 --- a/.github/workflows/code-qa.yml +++ b/.github/workflows/code-qa.yml @@ -186,6 +186,7 @@ jobs: libxkbfile-dev \ pkg-config \ build-essential \ + libkrb5-dev \ python3 - name: Turbo cache setup uses: actions/cache@v4 diff --git a/.github/workflows/marketplace-publish.yml b/.github/workflows/marketplace-publish.yml index a9777273e84..93751004c8a 100644 --- a/.github/workflows/marketplace-publish.yml +++ b/.github/workflows/marketplace-publish.yml @@ -201,6 +201,7 @@ jobs: libxkbfile-dev \ pkg-config \ build-essential \ + libkrb5-dev \ python3 - name: Turbo cache setup uses: actions/cache@v4 diff --git a/AGENTS.md b/AGENTS.md index e7ca48bdc38..1c147c015c6 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -14,6 +14,7 @@ This is a pnpm monorepo using Turbo for task orchestration: - **`apps/`** - E2E tests, Storybook, docs Key source directories: + - `src/api/providers/` - AI provider implementations (50+ providers) - `src/core/tools/` - Tool implementations (ReadFile, ApplyDiff, ExecuteCommand, etc.) - `src/services/` - Services (MCP, browser, checkpoints, code-index) @@ -67,11 +68,13 @@ Kilo Code is a fork of [Roo Code](https://github.com/RooVetGit/Roo-Code). We per To minimize merge conflicts when syncing with upstream, mark Kilo Code-specific changes in shared code with `kilocode_change` comments. **Single line:** + ```typescript const value = 42 // kilocode_change ``` **Multi-line:** + ```typescript // kilocode_change start const foo = 1 @@ -80,6 +83,7 @@ const bar = 2 ``` **New files:** + ```typescript // kilocode_change - new file ``` diff --git a/CHANGELOG.md b/CHANGELOG.md index e2f61f2dbba..8fcc5dcf9d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # kilo-code +## 4.143.21 + +- Fix JetBrains plugin build by handling missing gradle.properties in CI check script. Thanks @janpaul123, @kevinvandijk, @chrarnoldus, @hassoncs, @markijbema, @catrielmuller! - Fixes #3271 + ## 4.143.2 ### Patch Changes diff --git a/apps/kilocode-docs/docs/advanced-usage/agent-manager.md b/apps/kilocode-docs/docs/advanced-usage/agent-manager.md index 237bb5d65fb..361d79056b9 100644 --- a/apps/kilocode-docs/docs/advanced-usage/agent-manager.md +++ b/apps/kilocode-docs/docs/advanced-usage/agent-manager.md @@ -90,20 +90,22 @@ The Agent Manager requires proper authentication for full functionality, includi ### Supported Authentication Methods 1. **Kilo Code Extension (Recommended)** - - Sign in through the extension settings - - Provides seamless authentication for the Agent Manager - - Enables session syncing and cloud features + + - Sign in through the extension settings + - Provides seamless authentication for the Agent Manager + - Enables session syncing and cloud features 2. **CLI with Kilo Code Provider** - - Use the CLI configured with `kilocode` as the provider - - Run `kilocode config` to set up authentication - - See [CLI setup](/cli) for details + - Use the CLI configured with `kilocode` as the provider + - Run `kilocode config` to set up authentication + - See [CLI setup](/cli) for details ### BYOK Limitations **Important:** Bring Your Own Key (BYOK) is not yet supported with the Agent Manager. If you're using BYOK with providers like Anthropic, OpenAI, or OpenRouter: + - The Agent Manager will not have access to cloud-synced sessions - Session syncing features will be unavailable - You must use one of the supported authentication methods above for full functionality diff --git a/apps/kilocode-docs/docs/providers/bedrock.md b/apps/kilocode-docs/docs/providers/bedrock.md index 3f7e46c8e6b..bb7eae0730b 100644 --- a/apps/kilocode-docs/docs/providers/bedrock.md +++ b/apps/kilocode-docs/docs/providers/bedrock.md @@ -4,7 +4,7 @@ sidebar_label: AWS Bedrock # Using AWS Bedrock With Kilo Code -Kilo Code supports accessing models through Amazon Bedrock, a fully managed service that makes a selection of high-performing foundation models (FMs) from leading AI companies available via a single API. This provider connects directly to AWS Bedrock and authenticates with the provided credentials. +Kilo Code supports accessing models through Amazon Bedrock, a fully managed service that makes a selection of high-performing foundation models (FMs) from leading AI companies available via a single API. This provider connects directly to AWS Bedrock and authenticates with the provided credentials. **Website:** [https://aws.amazon.com/bedrock/](https://aws.amazon.com/bedrock/) diff --git a/cli/docs/DEVELOPMENT.md b/cli/docs/DEVELOPMENT.md index ffeb6f1b05d..a61c0b2d279 100644 --- a/cli/docs/DEVELOPMENT.md +++ b/cli/docs/DEVELOPMENT.md @@ -7,25 +7,29 @@ We use `pnpm` for package management. Please make sure `pnpm` is installed. The CLI is currently built by bundling the extension core and replacing the vscode rendering parts with a cli rendering engine. To _develop_ on the CLI you need to follow a few steps: 1. Install dependencies from the root workspace folder: - ```bash - pnpm install - ``` + + ```bash + pnpm install + ``` 2. Set up your environment file. Copy the sample and configure your API keys: - ```bash - cp .env.sample cli/dist/.env - # Edit cli/dist/.env with your API keys - ``` + + ```bash + cp .env.sample cli/dist/.env + # Edit cli/dist/.env with your API keys + ``` 3. Build the extension core from the root workspace folder: - ```bash - pnpm cli:bundle - ``` + + ```bash + pnpm cli:bundle + ``` 4. Change into the cli folder: - ```bash - cd ./cli - ``` + + ```bash + cd ./cli + ``` 5. Build & run the extension by running `pnpm start:dev`. If you want to use the CLI to work on its own code, you can run `pnpm start:dev -w ../` which will start it within the root workspace folder. diff --git a/docs/context-window-autofill.md b/docs/context-window-autofill.md new file mode 100644 index 00000000000..f9fa0d257c5 --- /dev/null +++ b/docs/context-window-autofill.md @@ -0,0 +1,45 @@ +# Context Window Auto-fill Feature + +## Objective + +Implement an auto-fill feature for the context window and other model capabilities in the OpenAI Compatible settings. + +## Changes + +### Backend + +1. **`src/shared/WebviewMessage.ts`**: + + - Added `requestOpenAiModelInfo` to `WebviewMessage` type. + - This message allows the frontend to request model information based on the selected model ID. + +2. **`src/shared/ExtensionMessage.ts`**: + + - Added `openAiModelInfo` to `ExtensionMessage` type. + - This property carriers the `ModelInfo` payload back to the frontend. + +3. **`src/api/providers/openai.ts`**: + + - Imported known model maps (`openAiNativeModels`, `anthropicModels`, etc.) from `@roo-code/types`. + - Added `getOpenAiModelInfo(modelId: string)` helper function. + - This function iterates through known model maps to find and return the `ModelInfo` for a given model ID. + +4. **`src/core/webview/webviewMessageHandler.ts`**: + - Added a handler for `requestOpenAiModelInfo`. + - It calls `getOpenAiModelInfo` and sends back an `openAiModelInfo` message with the result. + +### Frontend + +1. **`webview-ui/src/i18n/locales/en/settings.json`**: + + - Added `"autoFill": "Auto-fill"` translation key. + +2. **`webview-ui/src/components/settings/providers/OpenAICompatible.tsx`**: + - Imported `vscode` utility for message passing. + - Implemented `handleAutoFill` function that sends `requestOpenAiModelInfo`. + - Added a listener in `onMessage` to handle `openAiModelInfo` response and update `openAiCustomModelInfo` state. + - Added an "Auto-fill" button in the "Model Capabilities" section header. + +## Verification + +- Ran `pnpm check-types` successfully, confirming type safety across the monorepo. diff --git a/jetbrains/scripts/check-dependencies.js b/jetbrains/scripts/check-dependencies.js index 244998e427e..8ad70fd4aa4 100755 --- a/jetbrains/scripts/check-dependencies.js +++ b/jetbrains/scripts/check-dependencies.js @@ -309,6 +309,21 @@ function checkBuildSystem() { const gradlew = path.join(pluginDir, process.platform === "win32" ? "gradlew.bat" : "gradlew") const buildGradle = path.join(pluginDir, "build.gradle.kts") const gradleProps = path.join(pluginDir, "gradle.properties") + const gradlePropsTemplate = path.join(pluginDir, "gradle.properties.template") + + // Auto-generate gradle.properties from template if missing + if (!fs.existsSync(gradleProps) && fs.existsSync(gradlePropsTemplate)) { + try { + printWarning("gradle.properties is missing, generating from template...") + let content = fs.readFileSync(gradlePropsTemplate, "utf8") + // Use a default version for CI check - strict version sync happens later via sync:version + content = content.replace("{{VERSION}}", "0.0.0-dev") + fs.writeFileSync(gradleProps, content) + printFix("Generated gradle.properties from template") + } catch (error) { + printError(`Failed to generate gradle.properties: ${error.message}`) + } + } if (fs.existsSync(gradlew) && fs.existsSync(buildGradle) && fs.existsSync(gradleProps)) { printSuccess("Gradle build system is configured") diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index e1e6c66b24a..ce81eb92dd6 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -8,6 +8,14 @@ import { openAiModelInfoSaneDefaults, DEEP_SEEK_DEFAULT_TEMPERATURE, OPENAI_AZURE_AI_INFERENCE_PATH, + openAiNativeModels, + anthropicModels, + geminiModels, + mistralModels, + deepSeekModels, + qwenCodeModels, + vertexModels, + bedrockModels, } from "@roo-code/types" import type { ApiHandlerOptions } from "../../shared/api" @@ -164,11 +172,17 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl stream: true as const, ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), ...(reasoning && reasoning), - ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), - ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), - ...(metadata?.toolProtocol === "native" && { - parallel_tool_calls: metadata.parallelToolCalls ?? false, - }), + ...((this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary + ? { thinking: { type: "enabled" } } + : {}) as any), + ...(metadata?.tools && + modelInfo.supportsNativeTools !== false && { tools: this.convertToolsForOpenAI(metadata.tools) }), + ...(metadata?.tool_choice && + modelInfo.supportsNativeTools !== false && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && + modelInfo.supportsNativeTools !== false && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), } // Add max_tokens if needed @@ -251,11 +265,17 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl : enabledLegacyFormat ? [systemMessage, ...convertToSimpleMessages(messages)] : [systemMessage, ...convertToOpenAiMessages(messages)], - ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), - ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), - ...(metadata?.toolProtocol === "native" && { - parallel_tool_calls: metadata.parallelToolCalls ?? false, - }), + ...(metadata?.tools && + modelInfo.supportsNativeTools !== false && { tools: this.convertToolsForOpenAI(metadata.tools) }), + ...(metadata?.tool_choice && + modelInfo.supportsNativeTools !== false && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && + modelInfo.supportsNativeTools !== false && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), + ...((this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary + ? { thinking: { type: "enabled" } } + : {}) as any), } // Add max_tokens if needed @@ -387,11 +407,17 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined, temperature: undefined, - ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), - ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), - ...(metadata?.toolProtocol === "native" && { - parallel_tool_calls: metadata.parallelToolCalls ?? false, - }), + ...(metadata?.tools && + modelInfo.supportsNativeTools !== false && { tools: this.convertToolsForOpenAI(metadata.tools) }), + ...(metadata?.tool_choice && + modelInfo.supportsNativeTools !== false && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && + modelInfo.supportsNativeTools !== false && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), + ...((this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary + ? { thinking: { type: "enabled" } } + : {}) as any), } // O3 family models do not support the deprecated max_tokens parameter @@ -422,11 +448,17 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl ], reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined, temperature: undefined, - ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), - ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), - ...(metadata?.toolProtocol === "native" && { - parallel_tool_calls: metadata.parallelToolCalls ?? false, - }), + ...(metadata?.tools && + modelInfo.supportsNativeTools !== false && { tools: this.convertToolsForOpenAI(metadata.tools) }), + ...(metadata?.tool_choice && + modelInfo.supportsNativeTools !== false && { tool_choice: metadata.tool_choice }), + ...(metadata?.toolProtocol === "native" && + modelInfo.supportsNativeTools !== false && { + parallel_tool_calls: metadata.parallelToolCalls ?? false, + }), + ...((this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary + ? { thinking: { type: "enabled" } } + : {}) as any), } // O3 family models do not support the deprecated max_tokens parameter @@ -574,3 +606,60 @@ export async function getOpenAiModels(baseUrl?: string, apiKey?: string, openAiH return [] } } + +export function getOpenAiModelInfo(modelId: string): ModelInfo | undefined { + const models: Record[] = [ + openAiNativeModels, + anthropicModels, + geminiModels, + mistralModels, + deepSeekModels, + qwenCodeModels, + vertexModels, + bedrockModels, + ] + + // Helper function to sanitize and return model info + const sanitizeModelInfo = (info: ModelInfo): ModelInfo => { + if (info.tiers) { + return { + ...info, + tiers: info.tiers.map((tier) => ({ + ...tier, + // Replace Infinity/null with Number.MAX_SAFE_INTEGER (essentially unlimited) + contextWindow: + tier.contextWindow === Infinity || tier.contextWindow === null + ? Number.MAX_SAFE_INTEGER + : tier.contextWindow, + })), + } + } + return info + } + + // Try exact match first + for (const modelMap of models) { + if (modelId in modelMap) { + return sanitizeModelInfo(modelMap[modelId]) + } + } + + // Normalize search ID: remove provider prefix (e.g., "google/", "anthropic/") and convert to lowercase + const normalizedSearchId = modelId.replace(/^[a-z-]+\//i, "").toLowerCase() + + // Try fuzzy matching: find models where the key contains the normalized search ID + // or where the normalized search ID contains the model key's base name + for (const modelMap of models) { + const keys = Object.keys(modelMap) + for (const key of keys) { + const normalizedKey = key.toLowerCase() + // Check if key contains search term or search term contains key's base (without date suffix) + const keyBase = normalizedKey.replace(/-\d{8}$/, "") // Remove date suffixes like -20241022 + if (normalizedKey.includes(normalizedSearchId) || normalizedSearchId.includes(keyBase)) { + return sanitizeModelInfo(modelMap[key]) + } + } + } + + return undefined +} diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index c751d402bda..6a6fc333d6a 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -70,7 +70,7 @@ import { showSystemNotification } from "../../integrations/notifications" // kil import { singleCompletionHandler } from "../../utils/single-completion-handler" // kilocode_change import { searchCommits } from "../../utils/git" import { exportSettings, importSettingsWithFeedback } from "../config/importExport" -import { getOpenAiModels } from "../../api/providers/openai" +import { getOpenAiModels, getOpenAiModelInfo } from "../../api/providers/openai" import { getVsCodeLmModels } from "../../api/providers/vscode-lm" import { openMention } from "../mentions" import { getWorkspacePath } from "../../utils/path" @@ -893,14 +893,7 @@ export const webviewMessageHandler = async ( key: "openrouter", options: { provider: "openrouter", apiKey: openRouterApiKey, baseUrl: openRouterBaseUrl }, }, - { - key: "gemini", - options: { - provider: "gemini", - apiKey: apiConfiguration.geminiApiKey, - baseUrl: apiConfiguration.googleGeminiBaseUrl, - }, - }, + { key: "requesty", options: { @@ -919,7 +912,7 @@ export const webviewMessageHandler = async ( kilocodeOrganizationId: apiConfiguration.kilocodeOrganizationId, }, }, - { key: "ollama", options: { provider: "ollama", baseUrl: apiConfiguration.ollamaBaseUrl } }, + { key: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } }, { key: "deepinfra", @@ -998,6 +991,26 @@ export const webviewMessageHandler = async ( }) } + // Gemini is conditional - only fetch if user is actually using Gemini provider AND has API key + if (apiConfiguration.apiProvider === "gemini" && apiConfiguration.geminiApiKey) { + candidates.push({ + key: "gemini", + options: { + provider: "gemini", + apiKey: apiConfiguration.geminiApiKey, + baseUrl: apiConfiguration.googleGeminiBaseUrl, + }, + }) + } + + // Ollama is conditional - only fetch if user is actually using Ollama provider + if (apiConfiguration.apiProvider === "ollama" && apiConfiguration.ollamaBaseUrl) { + candidates.push({ + key: "ollama", + options: { provider: "ollama", baseUrl: apiConfiguration.ollamaBaseUrl }, + }) + } + // Apply single provider filter if specified const modelFetchPromises = providerFilter ? candidates.filter(({ key }) => key === providerFilter) @@ -1144,7 +1157,7 @@ export const webviewMessageHandler = async ( break } case "requestOpenAiModels": - if (message?.values?.baseUrl && message?.values?.apiKey) { + if (message?.values?.baseUrl) { const openAiModels = await getOpenAiModels( message?.values?.baseUrl, message?.values?.apiKey, @@ -1154,6 +1167,115 @@ export const webviewMessageHandler = async ( provider.postMessageToWebview({ type: "openAiModels", openAiModels }) } + break + case "requestOpenAiModelInfo": + if (message?.values?.openAiModelId) { + let modelInfo: ReturnType + + // Primary: Try OpenRouter first (most comprehensive and up-to-date) + try { + const { getOpenRouterModels } = await import("../../api/providers/fetchers/openrouter") + // If forceRefresh is true (manual Auto-fill click), flush cache first + if (message?.values?.forceRefresh) { + await flushModels("openrouter", true) + } + const openRouterModels = await getOpenRouterModels() + modelInfo = openRouterModels[message.values.openAiModelId] + + if (!modelInfo) { + const searchId = message.values.openAiModelId.toLowerCase() + // Normalize search ID by removing provider prefix + const normalizedSearchId = searchId.replace(/^[a-z-]+\//i, "") + const keys = Object.keys(openRouterModels) + // Find matches where either: + // 1. OpenRouter ID contains the search term + // 2. Normalized OpenRouter ID (without provider prefix) matches normalized search + const matches = keys.filter((id) => { + const lowerId = id.toLowerCase() + const normalizedId = lowerId.replace(/^[a-z-]+\//, "") // Remove provider prefix + return ( + lowerId.includes(searchId) || + normalizedId.includes(normalizedSearchId) || + normalizedSearchId.includes(normalizedId) + ) + }) + + if (matches.length > 0) { + // Sort by length to find the most concise match (often the base model) + matches.sort((a, b) => a.length - b.length) + modelInfo = openRouterModels[matches[0]] + } + } + } catch (error) { + console.error("Error fetching OpenRouter models for auto-fill:", error) + } + + // Merge: Get additional data from static maps (computer use, image support, etc.) + const staticModelInfo = getOpenAiModelInfo(message.values.openAiModelId) + if (modelInfo && staticModelInfo) { + // Merge static map data into OpenRouter data (static has curated capability flags) + modelInfo = { + ...modelInfo, + // Override with static map values if they provide additional capability info + supportsComputerUse: staticModelInfo.supportsComputerUse ?? modelInfo.supportsComputerUse, + supportsImages: staticModelInfo.supportsImages ?? modelInfo.supportsImages, + supportsNativeTools: staticModelInfo.supportsNativeTools ?? modelInfo.supportsNativeTools, + supportsPromptCache: staticModelInfo.supportsPromptCache ?? modelInfo.supportsPromptCache, + supportsReasoningBudget: + staticModelInfo.supportsReasoningBudget ?? modelInfo.supportsReasoningBudget, + supportsReasoningBinary: + staticModelInfo.supportsReasoningBinary ?? modelInfo.supportsReasoningBinary, + } + } else if (!modelInfo) { + // Fallback: Use static map if OpenRouter returned nothing + modelInfo = staticModelInfo + } + + // Heuristic: Auto-detect capabilities from model ID + const lowerModelId = message.values.openAiModelId.toLowerCase() + if (!modelInfo) { + modelInfo = {} as any + } + + // Only assign if modelInfo is now defined (it is, due to above check) + if (modelInfo) { + // Clone to avoid mutating shared state if it comes from a const + modelInfo = { ...modelInfo } + + if (lowerModelId.includes("computer")) { + modelInfo.supportsComputerUse = true + } + if ( + lowerModelId.includes("vision") || + lowerModelId.includes("vl") || + lowerModelId.includes("omni") || + lowerModelId.includes("gemini") || + lowerModelId.includes("gpt-4o") + ) { + modelInfo.supportsImages = true + } + if ( + lowerModelId.includes("reasoner") || + lowerModelId.includes("thinking") || + lowerModelId.includes("r1") || + lowerModelId.includes("o1") || + lowerModelId.includes("o3") + ) { + modelInfo.supportsReasoningBinary = true + } + } + + // Sanitize tiers to ensure contextWindow is not null (which breaks validation) + if (modelInfo?.tiers) { + modelInfo.tiers = modelInfo.tiers.map((tier: any) => ({ + ...tier, + contextWindow: tier.contextWindow === null ? undefined : tier.contextWindow, + })) + } + + // Always send response so UI knows the result (found or not found) + provider.postMessageToWebview({ type: "openAiModelInfo", openAiModelInfo: modelInfo }) + } break case "requestVsCodeLmModels": const vsCodeLmModels = await getVsCodeLmModels() diff --git a/src/package.json b/src/package.json index be31d7590f7..34b4da1e433 100644 --- a/src/package.json +++ b/src/package.json @@ -3,7 +3,7 @@ "displayName": "%extension.displayName%", "description": "%extension.description%", "publisher": "kilocode", - "version": "4.143.2", + "version": "4.143.21", "icon": "assets/icons/logo-outline-black.png", "galleryBanner": { "color": "#FFFFFF", diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index 20241d6347f..634f0f51fe5 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -97,6 +97,7 @@ export interface ExtensionMessage { | "listApiConfig" | "routerModels" | "openAiModels" + | "openAiModelInfo" // kilocode_change: Auto-filled model info from known databases | "ollamaModels" | "lmStudioModels" | "vsCodeLmModels" @@ -242,6 +243,7 @@ export interface ExtensionMessage { clineMessage?: ClineMessage routerModels?: RouterModels openAiModels?: string[] + openAiModelInfo?: ModelInfo // kilocode_change: Auto-filled model info ollamaModels?: ModelRecord lmStudioModels?: ModelRecord vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[] diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 8d53b561388..f0a9d88956d 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -83,6 +83,7 @@ export interface WebviewMessage { | "flushRouterModels" | "requestRouterModels" | "requestOpenAiModels" + | "requestOpenAiModelInfo" // kilocode_change: Auto-fill model info from known databases | "requestOllamaModels" | "requestLmStudioModels" | "requestRooModels" diff --git a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx index ad338d342ab..b9f1b70b1cc 100644 --- a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx +++ b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx @@ -22,6 +22,7 @@ import { inputEventTransform, noTransform } from "../transforms" import { ModelPicker } from "../ModelPicker" import { R1FormatSetting } from "../R1FormatSetting" import { ThinkingBudget } from "../ThinkingBudget" +import { vscode } from "../../../utils/vscode" type OpenAICompatibleProps = { apiConfiguration: ProviderSettings @@ -107,20 +108,72 @@ export const OpenAICompatible = ({ [setApiConfigurationField], ) - const onMessage = useCallback((event: MessageEvent) => { - const message: ExtensionMessage = event.data - - switch (message.type) { - case "openAiModels": { - const updatedModels = message.openAiModels ?? [] - setOpenAiModels(Object.fromEntries(updatedModels.map((item) => [item, openAiModelInfoSaneDefaults]))) - break + const onMessage = useCallback( + (event: MessageEvent) => { + const message: ExtensionMessage = event.data + + switch (message.type) { + case "openAiModels": { + const updatedModels = message.openAiModels ?? [] + setOpenAiModels( + Object.fromEntries(updatedModels.map((item) => [item, openAiModelInfoSaneDefaults])), + ) + break + } + case "openAiModelInfo": { + if (message.openAiModelInfo) { + setApiConfigurationField("openAiCustomModelInfo", { + ...(apiConfiguration.openAiCustomModelInfo || openAiModelInfoSaneDefaults), + ...message.openAiModelInfo, + }) + } + break + } } - } - }, []) + }, + [apiConfiguration, setApiConfigurationField], + ) useEvent("message", onMessage) + // Auto-list models when Base URL or API Key changes + // Auto-list models when Base URL or API Key changes + useEffect(() => { + const timer = setTimeout(() => { + if (apiConfiguration?.openAiBaseUrl) { + vscode.postMessage({ + type: "requestOpenAiModels", + values: { + baseUrl: apiConfiguration.openAiBaseUrl, + apiKey: apiConfiguration.openAiApiKey, + openAiHeaders: apiConfiguration.openAiHeaders, + }, + }) + } + }, 500) // Debounce 500ms + + return () => clearTimeout(timer) + }, [apiConfiguration?.openAiBaseUrl, apiConfiguration?.openAiApiKey, apiConfiguration?.openAiHeaders]) + + // Auto-fill when model ID changes + useEffect(() => { + if (apiConfiguration?.openAiModelId) { + vscode.postMessage({ + type: "requestOpenAiModelInfo", + values: { openAiModelId: apiConfiguration.openAiModelId }, + }) + } + }, [apiConfiguration?.openAiModelId]) + + const handleAutoFill = useCallback(() => { + if (apiConfiguration?.openAiModelId) { + vscode.postMessage({ + type: "requestOpenAiModelInfo", + values: { openAiModelId: apiConfiguration.openAiModelId, forceRefresh: true }, + }) + } + }, [apiConfiguration?.openAiModelId]) + return ( <>
-
- {t("settings:providers.customModel.capabilities")} +
+
+ {t("settings:providers.customModel.capabilities")} +
+
@@ -367,22 +429,78 @@ export const OpenAICompatible = ({
-
+ {/* Model Capabilities - Compact Grid */} +
+
+ ({ + ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), + supportsReasoningBinary: checked, + }))}> + {t("settings:providers.customModel.supportsReasoning.label")} + + + + +
+
+ ({ + ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), + supportsNativeTools: checked, + }))}> + {t("settings:providers.customModel.supportsNativeTools.label")} + + + + +
+
+ ({ + ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), + supportsComputerUse: checked, + }))}> + {t("settings:providers.customModel.computerUse.label")} + + + + +
{ - return { - ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), - supportsImages: checked, - } - })}> - - {t("settings:providers.customModel.imageSupport.label")} - + onChange={handleInputChange("openAiCustomModelInfo", (checked) => ({ + ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), + supportsImages: checked, + }))}> + {t("settings:providers.customModel.imageSupport.label")}
-
- {t("settings:providers.customModel.imageSupport.description")} -
-
- -
{ - return { - ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), - supportsPromptCache: checked, - } - })}> - {t("settings:providers.customModel.promptCache.label")} + onChange={handleInputChange("openAiCustomModelInfo", (checked) => ({ + ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), + supportsPromptCache: checked, + }))}> + {t("settings:providers.customModel.promptCache.label")}
-
- {t("settings:providers.customModel.promptCache.description")} -
diff --git a/webview-ui/src/i18n/locales/ar/settings.json b/webview-ui/src/i18n/locales/ar/settings.json index 878ab64c632..7777d3c403f 100644 --- a/webview-ui/src/i18n/locales/ar/settings.json +++ b/webview-ui/src/i18n/locales/ar/settings.json @@ -6,7 +6,8 @@ "reset": "إعادة ضبط", "select": "اختيار", "add": "إضافة رأس", - "remove": "إزالة" + "remove": "إزالة", + "autoFill": "Auto-fill" }, "header": { "title": "الإعدادات", @@ -592,7 +593,19 @@ "description": "تكلفة كتابة للكاش أول مرة." } }, - "resetDefaults": "رجع للإعدادات الافتراضية" + "resetDefaults": "رجع للإعدادات الافتراضية", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "تحديد الحدّ الأقصى عند النهاية", diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 9e3484e4e76..e031c2571c6 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -6,7 +6,8 @@ "reset": "Restablir", "select": "Seleccionar", "add": "Afegir capçalera", - "remove": "Eliminar" + "remove": "Eliminar", + "autoFill": "Auto-fill" }, "header": { "title": "Configuració", @@ -552,7 +553,19 @@ "description": "Cost per milió de tokens per escriure a la caché. Aquest és el preu cobrat quan s'emmagatzema un prompt per primera vegada." } }, - "resetDefaults": "Restablir als valors per defecte" + "resetDefaults": "Restablir als valors per defecte", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Límit de taxa al final", @@ -573,10 +586,9 @@ "none": "Cap", "minimal": "Mínim (el més ràpid)", "high": "Alt", - "xhigh": "Molt alt", + "xhigh": "Molt Alt", "medium": "Mitjà", - "low": "Baix", - "xhigh": "Molt Alt" + "low": "Baix" }, "verbosity": { "label": "Verbositat de la sortida", diff --git a/webview-ui/src/i18n/locales/cs/settings.json b/webview-ui/src/i18n/locales/cs/settings.json index 681690e1a15..d3d8c4f1885 100644 --- a/webview-ui/src/i18n/locales/cs/settings.json +++ b/webview-ui/src/i18n/locales/cs/settings.json @@ -6,7 +6,8 @@ "reset": "Resetovat", "select": "Vybrat", "add": "Přidat záhlaví", - "remove": "Odstranit" + "remove": "Odstranit", + "autoFill": "Auto-fill" }, "header": { "title": "Nastavení", @@ -583,7 +584,19 @@ "description": "Cena za milion tokenů za zápis do mezipaměti. Toto je cena účtovaná při prvním uložení výzvy do mezipaměti." } }, - "resetDefaults": "Obnovit výchozí" + "resetDefaults": "Obnovit výchozí", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Omezení rychlosti na konci", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 43a3a59ca9f..e0728e6b8d3 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -6,7 +6,8 @@ "reset": "Zurücksetzen", "select": "Auswählen", "add": "Header hinzufügen", - "remove": "Entfernen" + "remove": "Entfernen", + "autoFill": "Auto-fill" }, "header": { "title": "Einstellungen", @@ -561,7 +562,19 @@ "description": "Kosten pro Million Tokens für das Schreiben in den Cache. Dies ist der Preis, der beim ersten Cachen eines Prompts berechnet wird." } }, - "resetDefaults": "Auf Standardwerte zurücksetzen" + "resetDefaults": "Auf Standardwerte zurücksetzen", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Ratenbegrenzung am Ende", @@ -584,8 +597,7 @@ "high": "Hoch", "xhigh": "Sehr hoch", "medium": "Mittel", - "low": "Niedrig", - "xhigh": "Sehr hoch" + "low": "Niedrig" }, "verbosity": { "label": "Ausgabe-Ausführlichkeit", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 86922fa1b08..a8ed0b22b16 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -6,7 +6,8 @@ "reset": "Reset", "select": "Select", "add": "Add Header", - "remove": "Remove" + "remove": "Remove", + "autoFill": "Auto-fill" }, "header": { "title": "Settings", @@ -381,9 +382,6 @@ "getZaiApiKey": "Get Z AI API Key", "zaiEntrypoint": "Z AI Entrypoint", "zaiEntrypointDescription": "Please select the appropriate API entrypoint based on your location. If you are in China, choose open.bigmodel.cn. Otherwise, choose api.z.ai.", - "minimaxApiKey": "MiniMax API Key", - "getMiniMaxApiKey": "Get MiniMax API Key", - "minimaxBaseUrl": "MiniMax Entrypoint", "geminiApiKey": "Gemini API Key", "getGroqApiKey": "Get Groq API Key", "groqApiKey": "Groq API Key", @@ -539,6 +537,18 @@ "label": "Max Output Tokens", "description": "Maximum number of tokens the model can generate in a response. (Specify -1 to allow the server to set the max tokens.)" }, + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + }, "contextWindow": { "label": "Context Window Size", "description": "Total tokens (input + output) the model can process." diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 18d7766d3ef..42954e7775b 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -6,7 +6,8 @@ "reset": "Restablecer", "select": "Seleccionar", "add": "Añadir encabezado", - "remove": "Eliminar" + "remove": "Eliminar", + "autoFill": "Auto-fill" }, "header": { "title": "Configuración", @@ -535,7 +536,19 @@ "description": "Costo por millón de tokens para escribir en el caché. Este es el precio que se cobra cuando se almacena un prompt en caché por primera vez." } }, - "resetDefaults": "Restablecer valores predeterminados" + "resetDefaults": "Restablecer valores predeterminados", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Límite de tasa al final", @@ -556,10 +569,9 @@ "none": "Ninguno", "minimal": "Mínimo (el más rápido)", "high": "Alto", - "xhigh": "Muy alto", + "xhigh": "Extra Alto", "medium": "Medio", - "low": "Bajo", - "xhigh": "Extra Alto" + "low": "Bajo" }, "verbosity": { "label": "Verbosidad de la salida", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 9bafa973f6a..5e116fa2287 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -6,7 +6,8 @@ "reset": "Réinitialiser", "select": "Sélectionner", "add": "Ajouter un en-tête", - "remove": "Supprimer" + "remove": "Supprimer", + "autoFill": "Auto-fill" }, "header": { "title": "Paramètres", @@ -535,7 +536,19 @@ "description": "Coût par million de tokens pour l'écriture dans le cache. C'est le prix facturé lors de la première mise en cache d'un prompt." } }, - "resetDefaults": "Réinitialiser les valeurs par défaut" + "resetDefaults": "Réinitialiser les valeurs par défaut", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Limite de débit en fin", @@ -558,8 +571,7 @@ "high": "Élevé", "xhigh": "Très élevé", "medium": "Moyen", - "low": "Faible", - "xhigh": "Très élevé" + "low": "Faible" }, "verbosity": { "label": "Verbosité de la sortie", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index e89acea7084..5dc4c7e58a9 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -6,7 +6,8 @@ "reset": "रीसेट करें", "select": "चुनें", "add": "हेडर जोड़ें", - "remove": "हटाएं" + "remove": "हटाएं", + "autoFill": "Auto-fill" }, "header": { "title": "सेटिंग्स", @@ -552,7 +553,19 @@ "description": "कैश में लिखने के लिए प्रति मिलियन टोकन की लागत। यह वह मूल्य है जो पहली बार प्रॉम्प्ट को कैश करने पर लगाया जाता है।" } }, - "resetDefaults": "डिफ़ॉल्ट पर रीसेट करें" + "resetDefaults": "डिफ़ॉल्ट पर रीसेट करें", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "अंत में दर सीमा", @@ -573,10 +586,9 @@ "none": "कोई नहीं", "minimal": "न्यूनतम (सबसे तेज़)", "high": "उच्च", - "xhigh": "बहुत उच्च", + "xhigh": "अत्यंत उच्च", "medium": "मध्यम", - "low": "निम्न", - "xhigh": "अत्यंत उच्च" + "low": "निम्न" }, "verbosity": { "label": "आउटपुट वर्बोसिटी", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index faa10d688f2..56de72cc7b4 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -6,7 +6,8 @@ "reset": "Reset", "select": "Pilih", "add": "Tambah Header", - "remove": "Hapus" + "remove": "Hapus", + "autoFill": "Auto-fill" }, "header": { "title": "Pengaturan", @@ -552,7 +553,19 @@ "description": "Biaya per juta token untuk menulis ke cache. Ini adalah harga yang dikenakan ketika prompt di-cache untuk pertama kalinya." } }, - "resetDefaults": "Reset ke Default" + "resetDefaults": "Reset ke Default", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Batas laju di akhir", @@ -573,10 +586,9 @@ "none": "Tidak Ada", "minimal": "Minimal (Tercepat)", "high": "Tinggi", - "xhigh": "Sangat tinggi", + "xhigh": "Ekstra Tinggi", "medium": "Sedang", - "low": "Rendah", - "xhigh": "Ekstra Tinggi" + "low": "Rendah" }, "verbosity": { "label": "Verbositas Output", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index f91935e7aab..ed8d033e839 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -6,7 +6,8 @@ "reset": "Ripristina", "select": "Seleziona", "add": "Aggiungi intestazione", - "remove": "Rimuovi" + "remove": "Rimuovi", + "autoFill": "Auto-fill" }, "header": { "title": "Impostazioni", @@ -562,7 +563,19 @@ "description": "Costo per milione di token per scrivere nella cache. Questo prezzo viene applicato quando si memorizza un prompt nella cache per la prima volta." } }, - "resetDefaults": "Ripristina valori predefiniti" + "resetDefaults": "Ripristina valori predefiniti", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Limite di velocità alla fine", @@ -583,10 +596,9 @@ "none": "Nessuno", "minimal": "Minimo (più veloce)", "high": "Alto", - "xhigh": "Molto alto", + "xhigh": "Extra Alto", "medium": "Medio", - "low": "Basso", - "xhigh": "Extra Alto" + "low": "Basso" }, "verbosity": { "label": "Verbosity dell'output", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 360d47cedb8..04d7a46408e 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -6,7 +6,8 @@ "reset": "リセット", "select": "選択", "add": "ヘッダーを追加", - "remove": "削除" + "remove": "削除", + "autoFill": "Auto-fill" }, "header": { "title": "設定", @@ -553,7 +554,19 @@ "description": "キャッシュへの書き込みの100万トークンあたりのコスト。これはプロンプトが初めてキャッシュされる際に課金される価格です。" } }, - "resetDefaults": "デフォルトにリセット" + "resetDefaults": "デフォルトにリセット", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "終了時のレート制限", @@ -574,10 +587,9 @@ "none": "なし", "minimal": "最小 (最速)", "high": "高", - "xhigh": "非常に高い", + "xhigh": "極高", "medium": "中", - "low": "低", - "xhigh": "極高" + "low": "低" }, "verbosity": { "label": "出力の冗長性", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 056852c59c0..332a5b2d5b2 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -6,7 +6,8 @@ "reset": "초기화", "select": "선택", "add": "헤더 추가", - "remove": "삭제" + "remove": "삭제", + "autoFill": "Auto-fill" }, "header": { "title": "설정", @@ -552,7 +553,19 @@ "description": "캐시에 쓰기의 백만 토큰당 비용입니다. 이는 프롬프트가 처음 캐시될 때 청구되는 가격입니다." } }, - "resetDefaults": "기본값으로 재설정" + "resetDefaults": "기본값으로 재설정", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "종료 시 속도 제한", @@ -575,8 +588,7 @@ "high": "높음", "xhigh": "매우 높음", "medium": "중간", - "low": "낮음", - "xhigh": "매우 높음" + "low": "낮음" }, "verbosity": { "label": "출력 상세도", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 0f20c587ced..0a0004d60c8 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -6,7 +6,8 @@ "reset": "Resetten", "select": "Selecteren", "add": "Header toevoegen", - "remove": "Verwijderen" + "remove": "Verwijderen", + "autoFill": "Auto-fill" }, "header": { "title": "Instellingen", @@ -552,7 +553,19 @@ "description": "Kosten per miljoen tokens voor het schrijven naar de cache. Dit is de prijs die wordt gerekend wanneer een prompt voor het eerst wordt gecachet." } }, - "resetDefaults": "Standaardwaarden herstellen" + "resetDefaults": "Standaardwaarden herstellen", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Snelheidslimiet aan het eind", @@ -573,10 +586,9 @@ "none": "Geen", "minimal": "Minimaal (Snelst)", "high": "Hoog", - "xhigh": "Zeer hoog", + "xhigh": "Extra Hoog", "medium": "Middel", - "low": "Laag", - "xhigh": "Extra Hoog" + "low": "Laag" }, "verbosity": { "label": "Uitvoerbaarheid", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 175526d483f..0dfe07a0210 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -6,7 +6,8 @@ "reset": "Resetuj", "select": "Wybierz", "add": "Dodaj nagłówek", - "remove": "Usuń" + "remove": "Usuń", + "autoFill": "Auto-fill" }, "header": { "title": "Ustawienia", @@ -552,7 +553,19 @@ "description": "Koszt za milion tokenów za zapis do bufora. Ta cena jest naliczana przy pierwszym buforowaniu podpowiedzi." } }, - "resetDefaults": "Przywróć domyślne" + "resetDefaults": "Przywróć domyślne", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Limit prędkości na końcu", @@ -575,8 +588,7 @@ "high": "Wysoki", "xhigh": "Bardzo wysoki", "medium": "Średni", - "low": "Niski", - "xhigh": "Bardzo wysoki" + "low": "Niski" }, "verbosity": { "label": "Szczegółowość danych wyjściowych", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 539640148a8..e80e56c6c3f 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -6,7 +6,8 @@ "reset": "Redefinir", "select": "Selecionar", "add": "Adicionar cabeçalho", - "remove": "Remover" + "remove": "Remover", + "autoFill": "Auto-fill" }, "header": { "title": "Configurações", @@ -526,7 +527,19 @@ "description": "Custo por milhão de tokens para escrita no cache. Este é o preço cobrado quando um prompt é armazenado em cache pela primeira vez." } }, - "resetDefaults": "Restaurar Padrões" + "resetDefaults": "Restaurar Padrões", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Limite de taxa no fim", @@ -547,10 +560,9 @@ "none": "Nenhum", "minimal": "Mínimo (mais rápido)", "high": "Alto", - "xhigh": "Muito alto", + "xhigh": "Extra Alto", "medium": "Médio", - "low": "Baixo", - "xhigh": "Extra Alto" + "low": "Baixo" }, "verbosity": { "label": "Verbosidade da saída", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 541dbb889aa..4f06611f2c6 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -6,7 +6,8 @@ "reset": "Сбросить", "select": "Выбрать", "add": "Добавить заголовок", - "remove": "Удалить" + "remove": "Удалить", + "autoFill": "Auto-fill" }, "header": { "title": "Настройки", @@ -552,7 +553,19 @@ "description": "Стоимость за миллион токенов при записи в кэш. Взимается при первом кэшировании подсказки." } }, - "resetDefaults": "Сбросить к значениям по умолчанию" + "resetDefaults": "Сбросить к значениям по умолчанию", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Ограничение скорости в конце", @@ -575,8 +588,7 @@ "high": "Высокие", "xhigh": "Очень высокие", "medium": "Средние", - "low": "Низкие", - "xhigh": "Очень высокие" + "low": "Низкие" }, "verbosity": { "label": "Подробность вывода", diff --git a/webview-ui/src/i18n/locales/th/settings.json b/webview-ui/src/i18n/locales/th/settings.json index 8d57cf6a49e..0cc4478a5bc 100644 --- a/webview-ui/src/i18n/locales/th/settings.json +++ b/webview-ui/src/i18n/locales/th/settings.json @@ -6,7 +6,8 @@ "reset": "รีเซ็ต", "select": "เลือก", "add": "เพิ่ม Header", - "remove": "ลบ" + "remove": "ลบ", + "autoFill": "Auto-fill" }, "header": { "title": "การตั้งค่า", @@ -551,7 +552,19 @@ "description": "ค่าใช้จ่ายต่อล้านโทเค็นสำหรับการเขียนไปยังแคช นี่คือราคาที่เรียกเก็บเมื่อมีการแคชพรอมต์เป็นครั้งแรก" } }, - "resetDefaults": "รีเซ็ตเป็นค่าเริ่มต้น" + "resetDefaults": "รีเซ็ตเป็นค่าเริ่มต้น", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "จำกัดอัตราที่จุดสิ้นสุด", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 5fac19b4a80..fbd450a098d 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -6,7 +6,8 @@ "reset": "Sıfırla", "select": "Seç", "add": "Başlık Ekle", - "remove": "Kaldır" + "remove": "Kaldır", + "autoFill": "Auto-fill" }, "header": { "title": "Ayarlar", @@ -527,7 +528,19 @@ "description": "Önbelleğe yazma başına milyon token maliyeti. Bu, bir istem ilk kez önbelleğe alındığında uygulanan fiyattır." } }, - "resetDefaults": "Varsayılanlara Sıfırla" + "resetDefaults": "Varsayılanlara Sıfırla", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Bitişte oran sınırı", @@ -548,10 +561,9 @@ "none": "Yok", "minimal": "Minimal (en hızlı)", "high": "Yüksek", - "xhigh": "Çok yüksek", + "xhigh": "Ekstra Yüksek", "medium": "Orta", - "low": "Düşük", - "xhigh": "Ekstra Yüksek" + "low": "Düşük" }, "verbosity": { "label": "Çıktı Ayrıntı Düzeyi", diff --git a/webview-ui/src/i18n/locales/uk/settings.json b/webview-ui/src/i18n/locales/uk/settings.json index 83e9a866f24..6d3e8bc1a55 100644 --- a/webview-ui/src/i18n/locales/uk/settings.json +++ b/webview-ui/src/i18n/locales/uk/settings.json @@ -6,7 +6,8 @@ "reset": "Скинути", "select": "Вибрати", "add": "Додати заголовок", - "remove": "Видалити" + "remove": "Видалити", + "autoFill": "Auto-fill" }, "header": { "title": "Налаштування", @@ -591,7 +592,19 @@ "description": "Вартість за мільйон токенів для запису в кеш. Це ціна, що стягується, коли підказка кешується вперше." } }, - "resetDefaults": "Скинути до стандартних" + "resetDefaults": "Скинути до стандартних", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Обмеження швидкості в кінці", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 24f762a714f..3f7efb0147d 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -6,7 +6,8 @@ "reset": "Đặt lại", "select": "Chọn", "add": "Thêm tiêu đề", - "remove": "Xóa" + "remove": "Xóa", + "autoFill": "Auto-fill" }, "header": { "title": "Cài đặt", @@ -552,7 +553,19 @@ "description": "Chi phí cho mỗi triệu token khi ghi vào bộ nhớ đệm. Đây là giá được tính khi một lời nhắc được lưu vào bộ nhớ đệm lần đầu tiên." } }, - "resetDefaults": "Đặt lại về mặc định" + "resetDefaults": "Đặt lại về mặc định", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "Giới hạn tỷ lệ ở cuối", @@ -575,8 +588,7 @@ "high": "Cao", "xhigh": "Rất cao", "medium": "Trung bình", - "low": "Thấp", - "xhigh": "Rất cao" + "low": "Thấp" }, "verbosity": { "label": "Mức độ chi tiết đầu ra", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 919383096e9..7825d670c44 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -6,7 +6,8 @@ "reset": "恢复默认设置", "select": "选择", "add": "添加标头", - "remove": "移除" + "remove": "移除", + "autoFill": "Auto-fill" }, "header": { "title": "设置", @@ -552,7 +553,19 @@ "description": "向缓存写入每百万Token的成本。这是首次缓存提示时收取的费用。" } }, - "resetDefaults": "重置为默认值" + "resetDefaults": "重置为默认值", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "在结束时速率限制", @@ -573,10 +586,9 @@ "none": "无", "minimal": "最小 (最快)", "high": "高", - "xhigh": "超高", + "xhigh": "极高", "medium": "中", - "low": "低", - "xhigh": "极高" + "low": "低" }, "verbosity": { "label": "输出详细程度", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 25a74df1127..71f06fad727 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -6,7 +6,8 @@ "reset": "重設", "select": "選擇", "add": "新增標頭", - "remove": "移除" + "remove": "移除", + "autoFill": "Auto-fill" }, "header": { "title": "設定", @@ -527,7 +528,19 @@ "description": "每百萬 Token 的快取寫入費用。當提示首次被儲存至快取時,會收取此費用。" } }, - "resetDefaults": "重設為預設值" + "resetDefaults": "重設為預設值", + "supportsReasoning": { + "label": "Supports Reasoning (e.g. o1/DeepSeek)", + "description": "Enable this if your model supports reasoning capabilities (e.g. 'think' tags)." + }, + "supportsNativeTools": { + "label": "Supports Function Calling", + "description": "Enable this if your model supports native function calling (tools)." + }, + "supportsComputerUse": { + "label": "Supports Computer Use", + "description": "Enable this if your model supports computer use/browser interaction." + } }, "rateLimitAfter": { "label": "在結束時速率限制", @@ -550,8 +563,7 @@ "high": "高", "xhigh": "超高", "medium": "中", - "low": "低", - "xhigh": "超高" + "low": "低" }, "verbosity": { "label": "輸出詳細程度",