diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 7486f8a74..fca28dada 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -18,6 +18,7 @@ dependencies = [ "flume", "font-loader", "futures", + "futures-util", "glob", "hex", "image", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 2a18a889f..42ba32367 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -91,6 +91,7 @@ axum = "0.8.6" tower-http = { version = "0.6.6", features = ["cors"] } sha2 = "0.10.9" async-trait = "0.1.89" +futures-util = "0.3.31" [target."cfg(windows)".dependencies] winreg = "0.55.0" diff --git a/src-tauri/src/instance/models/misc.rs b/src-tauri/src/instance/models/misc.rs index 2797d4ab5..1a4321334 100644 --- a/src-tauri/src/instance/models/misc.rs +++ b/src-tauri/src/instance/models/misc.rs @@ -78,7 +78,7 @@ pub enum ModLoaderStatus { } structstruck::strike! { - #[strikethrough[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize, Default)]] + #[strikethrough[derive(Debug, PartialEq, Clone, Deserialize, Serialize, Default)]] #[strikethrough[serde(rename_all = "camelCase", deny_unknown_fields, default)]] pub struct Instance { pub id: String, diff --git a/src-tauri/src/intelligence/commands.rs b/src-tauri/src/intelligence/commands.rs new file mode 100644 index 000000000..8b53894b1 --- /dev/null +++ b/src-tauri/src/intelligence/commands.rs @@ -0,0 +1,100 @@ +use std::sync::Mutex; + +use tauri::{AppHandle, Manager}; +use tauri_plugin_http::reqwest; + +use crate::error::SJMCLResult; +use crate::intelligence::models::{ + ChatCompletionRequest, ChatCompletionResponse, ChatMessage, ChatModelsResponse, LLMServiceError, +}; +use crate::launcher_config::models::LauncherConfig; + +// TODO: make chat completion as helper funtion +// TODO: migrate log analysis logic to backend (w multi-language prompt and result parsing) + +#[tauri::command] +pub async fn check_llm_service_availability( + app: AppHandle, + base_url: String, + api_key: String, + model: String, +) -> SJMCLResult<()> { + let client = app.state::(); + let response = client + .get(format!("{}/v1/models", base_url)) + .bearer_auth(api_key) + .send() + .await + .map_err(|e| { + log::error!("Error connecting to LLM service: {}", e); + LLMServiceError::NetworkError + })?; + + if response.status().is_success() { + let models_response = response.json::().await.map_err(|e| { + log::error!("Error parsing LLM service response: {}", e); + LLMServiceError::ApiParseError + })?; + if models_response.data.iter().any(|m| m.id == model) { + Ok(()) + } else { + Err(LLMServiceError::NoSuchModel.into()) + } + } else { + Err(LLMServiceError::InvalidAPIKey.into()) + } +} + +#[tauri::command] +pub async fn fetch_llm_chat_response( + app: AppHandle, + messages: Vec, +) -> SJMCLResult { + let client = reqwest::Client::new(); // use a separate client instance w/o timeout. + + let (enabled, model_config) = { + let config_binding = app.state::>(); + let config_state = config_binding.lock()?; + ( + config_state.intelligence.enabled, + config_state.intelligence.model.clone(), + ) + }; + + if !enabled { + return Err(LLMServiceError::NotEnabled.into()); + } + + let response = client + .post(format!("{}/v1/chat/completions", model_config.base_url)) + .bearer_auth(&model_config.api_key) + .json(&ChatCompletionRequest { + model: model_config.model.clone(), + messages, + stream: false, + }) + .send() + .await + .map_err(|e| { + log::error!("Error connecting to AI service: {}", e); + LLMServiceError::NetworkError + })?; + + if response.status().is_success() { + let completion_response = response + .json::() + .await + .map_err(|e| { + log::error!("Error parsing AI service response: {}", e); + LLMServiceError::ApiParseError + })?; + if let Some(choice) = completion_response.choices.first() { + Ok(choice.message.content.clone()) + } else { + Err(LLMServiceError::NoResponse.into()) + } + } else { + log::error!("AI service returned error status: {}", response.status()); + Err(LLMServiceError::NetworkError.into()) + } +} diff --git a/src-tauri/src/intelligence/mod.rs b/src-tauri/src/intelligence/mod.rs new file mode 100644 index 000000000..0c7f3b0fd --- /dev/null +++ b/src-tauri/src/intelligence/mod.rs @@ -0,0 +1,2 @@ +pub mod commands; +pub mod models; diff --git a/src-tauri/src/intelligence/models.rs b/src-tauri/src/intelligence/models.rs new file mode 100644 index 000000000..2c9783223 --- /dev/null +++ b/src-tauri/src/intelligence/models.rs @@ -0,0 +1,53 @@ +use serde::{Deserialize, Serialize}; +use strum_macros::Display; + +structstruck::strike! { + #[strikethrough[derive(Serialize, Deserialize)]] + pub struct ChatModelsResponse { + pub data: Vec, + } +} + +structstruck::strike! { + #[strikethrough[derive(Serialize, Deserialize)]] + pub struct ChatCompletionRequest { + pub model: String, + pub messages: Vec, + pub stream: bool, + } +} + +structstruck::strike! { + #[strikethrough[derive(Serialize, Deserialize)]] + pub struct ChatCompletionResponse { + pub id: String, + pub object: String, + pub created: u64, + pub choices: Vec, + } +} + +#[derive(Debug, Display)] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] +pub enum LLMServiceError { + ApiParseError, + InvalidAPIKey, + NetworkError, + NotEnabled, + NoSuchModel, + NoResponse, +} + +impl std::error::Error for LLMServiceError {} diff --git a/src-tauri/src/launcher_config/models.rs b/src-tauri/src/launcher_config/models.rs index 061208bf1..a3a1bbee3 100644 --- a/src-tauri/src/launcher_config/models.rs +++ b/src-tauri/src/launcher_config/models.rs @@ -79,7 +79,7 @@ pub enum LauncherVisiablity { // assert!(config.access("114514").is_err()) // structstruck::strike! { - #[strikethrough[derive(Partial, Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]] + #[strikethrough[derive(Partial, Debug, PartialEq, Clone, Deserialize, Serialize)]] #[strikethrough[serde(rename_all = "camelCase", deny_unknown_fields)]] #[strikethrough[derive(SmartDefault)]] #[strikethrough[serde(default)]] @@ -163,7 +163,7 @@ pub enum ProxyType { } structstruck::strike! { - #[strikethrough[derive(Partial, Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]] + #[strikethrough[derive(Partial, Debug, PartialEq, Clone, Deserialize, Serialize)]] #[strikethrough[serde(rename_all = "camelCase", deny_unknown_fields)]] #[strikethrough[derive(SmartDefault)]] #[strikethrough[serde(default)]] @@ -259,6 +259,18 @@ structstruck::strike! { } }, pub global_game_config: GameConfig, + pub intelligence: struct IntelligenceConfig { + pub enabled: bool, + pub model: struct LLMModelConfig { + pub base_url: String, + pub api_key: String, + pub model: String, + }, + // pub chat: struct LLMChatConfig { + // #[default = 0.7] + // pub temperature: f32, + // } + }, pub local_game_directories: Vec, #[default(_code="vec![\"https://mc.sjtu.cn/api-sjmcl/article\".to_string(), \"https://mc.sjtu.cn/api-sjmcl/article/mua\".to_string()]")] @@ -294,7 +306,7 @@ structstruck::strike! { #[default([true, true])] pub accordion_states: [bool; 2], }, - } + }, } } diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 63e881131..edd592552 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -2,6 +2,7 @@ mod account; mod discover; mod error; mod instance; +mod intelligence; mod launch; mod launcher_config; mod partial; @@ -142,6 +143,8 @@ pub async fn run() { resource::commands::fetch_remote_resource_by_id, discover::commands::fetch_news_sources_info, discover::commands::fetch_news_post_summaries, + intelligence::commands::check_llm_service_availability, + intelligence::commands::fetch_llm_chat_response, tasks::commands::schedule_progressive_task_group, tasks::commands::cancel_progressive_task, tasks::commands::resume_progressive_task, diff --git a/src/layouts/settings-layout.tsx b/src/layouts/settings-layout.tsx index 24304b996..343ed6e03 100644 --- a/src/layouts/settings-layout.tsx +++ b/src/layouts/settings-layout.tsx @@ -13,6 +13,7 @@ import { LuPalette, LuRefreshCcw, LuSettings, + LuSparkles, } from "react-icons/lu"; import NavMenu from "@/components/common/nav-menu"; import { isDev } from "@/utils/env"; @@ -34,6 +35,7 @@ const SettingsLayout: React.FC = ({ children }) => { { key: "general", icon: LuSettings }, { key: "appearance", icon: LuPalette }, { key: "download", icon: LuCloudDownload }, + { key: "intelligence", icon: LuSparkles }, { key: "sync-restore", icon: LuRefreshCcw }, { key: "help", icon: LuCircleHelp }, { key: "about", icon: LuInfo }, diff --git a/src/locales/en.json b/src/locales/en.json index c8dac3482..f391931bd 100644 --- a/src/locales/en.json +++ b/src/locales/en.json @@ -829,14 +829,22 @@ "MOD_INTERNET_ERROR": "The instance cannot continue due to mod download failure. Please check your network connection or use a proxy.", "VICS_MODERN_WARFARE_ERROR": "The instance cannot continue due to Vic's Modern Warfare mod error. Please try updating or removing the mod.", "FORGE_LITELOADER_CONFLICT": "The instance cannot continue due to Forge and LiteLoader conflict. Please try removing LiteLoader or changing Forge version.", - "UNKNOWN": "Cannot determine the cause of the crash. Please check the logs for more details or export the crash report." + "UNKNOWN": "Cannot determine the cause of the crash. You can use AI analysis, or check the logs for more details and export the crash report." }, "button": { + "aiAnalysis": "AI Analysis", "exportGameInfo": "Export Crash Report", "gameLogs": "Game Logs", "help": "Help" }, - "bottomAlert": "Export and send the crash report instead of a screenshot of this page for help." + "bottomAlert": "Export and send the crash report instead of a screenshot of this page for help.", + "aiAnalysis": { + "otherInfo": "Other Information", + "structureNotProcessed": "AI did not return a valid JSON structure, below is its original response:", + "systemPrompt": "You are a Minecraft launch/crash diagnostics expert. Only output in the following JSON format, with no greetings, explanations, or extra text before/after. \n\n {\n \"reasons\": [ // Main causes and solutions (keep different)\n { \"reason\": \"One-sentence cause\", \"fix\": \"Simple solution, support Markdown syntax and may include possible causes, troubleshooting steps, commands/paths, etc.\" }\n ]\n}\n\n Requirements:\n - Strictly output only JSON, nothing else.\n - \"reasons\" should have less than 5 items, each cause should be simple and effective.\n - Focus on this log, do not give generic advice.\n - Prevent outputting guesses and solutions to problems related to the SJMCL launcher itself.", + "title": "AI Crash Analysis", + "userPrompt": "The player's game has crashed. The player is using {{os}} operating system, Java version {{javaVersion}}, Minecraft version {{mcVersion}} and SJMCL launcher.\n\nHere is the relevant part of the game crash log:\n\n{{log}}\n\nBased on the log content, please analyze the main cause of the game crash and return a solution in JSON format." + } }, "GameLogPage": { "placeholder": "Filter by Text", @@ -904,6 +912,7 @@ "error": "Failed to share" } }, + "unknown": "Unknown", "version": { "dev": "development version", "nightly": "nightly build", @@ -931,6 +940,10 @@ "part-2": "." } }, + "enableAiProvider": { + "title": "Generative AI Features", + "description": "When enabled, AI can analyze crash causes on the game crash page." + }, "instancesNavType": { "title": "Instance Page Navigation Mode", "description": "Change whether the instance page's left navigation bar is displayed and its grouping method.", @@ -1292,6 +1305,31 @@ "launch": "Play this Server" } }, + "IntelligenceSettingsPage": { + "masterSwitch": { + "title": "Intelligent Services", + "description": "Enhance SJMCL with intelligent features powered by user-provided generative language models" + }, + "model": { + "title": "Model", + "settings": { + "baseUrl": { + "title": "Base URL" + }, + "apiKey": { + "title": "API Key" + }, + "model": { + "title": "Model Name" + }, + "checkAvailability": { + "title": "Model Service Availability", + "available": "Available", + "unavailable": "Unavailable" + } + } + } + }, "JavaSettingsPage": { "javaList": { "title": "Java Management", @@ -2374,6 +2412,32 @@ } } }, + "intelligence": { + "checkLLMServiceAvailability": { + "success": "GenAI service available", + "error": { + "title": "GenAI service unavailable", + "description": { + "NETWORK_ERROR": "Network error", + "API_PARSE_ERROR": "Failed to parse service API", + "NO_SUCH_MODEL": "The specified model does not exist, please check if the name is correct", + "INVALID_API_KEY": "Invalid API key" + } + } + }, + "fetchLLMChatResponse": { + "error": { + "title": "Failed to retrieve AI chat response", + "description": { + "NETWORK_ERROR": "Network error", + "NOT_ENABLED": "GenAI feature not enabled", + "API_PARSE_ERROR": "Failed to parse service API", + "INVALID_API_KEY": "Invalid API key", + "NO_RESPONSE": "No response received" + } + } + } + }, "task": { "scheduleProgressiveTaskGroup": { "error": "Failed to create task" @@ -2404,6 +2468,7 @@ "general": "General", "appearance": "Appearance", "download": "Download", + "intelligence": "Intelligence", "sync-restore": "Sync & Restore", "help": "Docs & Help", "about": "About", diff --git a/src/locales/zh-Hans.json b/src/locales/zh-Hans.json index 61977edad..513f3dc89 100644 --- a/src/locales/zh-Hans.json +++ b/src/locales/zh-Hans.json @@ -829,14 +829,22 @@ "MOD_INTERNET_ERROR": "当前实例由于模组下载失败,无法继续运行。请检查网络连接或使用网络代理。", "VICS_MODERN_WARFARE_ERROR": "当前实例由于 Vics Modern Warfare 模组错误,无法继续运行。请尝试更新或删除该模组。", "FORGE_LITELOADER_CONFLICT": "当前实例由于 Forge 与 LiteLoader 冲突,无法继续运行。请尝试删除 LiteLoader 或更换 Forge 版本。", - "UNKNOWN": "暂时无法分析该错误,请查看游戏日志或导出错误崩溃报告。" + "UNKNOWN": "暂时无法分析该错误,可尝试使用 AI 分析,或查看游戏日志、导出错误崩溃报告。" }, "button": { + "aiAnalysis": "使用 AI 分析", "exportGameInfo": "导出游戏崩溃报告", "gameLogs": "游戏日志", "help": "帮助" }, - "bottomAlert": "如需寻求他人帮助,请导出并发送崩溃报告,而非这个窗口的截图" + "bottomAlert": "如需寻求他人帮助,请导出并发送崩溃报告,而非这个窗口的截图", + "aiAnalysis": { + "otherInfo": "其他提示", + "structureNotProcessed": "AI 未能返回正确的 JSON 结构,下面是它的原始回复:", + "systemPrompt": "你是 Minecraft 启动/崩溃诊断专家。请只按以下 JSON 模式输出,不要任何开头/结尾客套话、不要解释。 \n\n {\n \"reasons\": [ // 主要原因与解决方案(每条不要重复)\n { \"reason\": \"一句话原因\", \"fix\": \"简单解决方案,支持 Markdown 语法,可包含若干可能原因、定位思路、命令/路径等\" }\n ]}\n\n 要求:\n - 严禁输出 JSON 以外的内容。\n - \"reasons\" 5 条以内,每条应当简单有效。\n - 聚焦本次日志,不要泛泛而谈。\n - 禁止输出与 SJMCL 启动器本身相关的问题猜测与解决方案。", + "title": "AI 崩溃分析", + "userPrompt": "玩家的游戏发生了崩溃。玩家使用 {{os}} 操作系统,Java 版本为 {{javaVersion}} ,Minecraft 版本为 {{mcVersion}},且使用了 SJMCL 启动器。\n\n这是游戏崩溃日志的相关部分:\n\n{{log}}\n\n请根据日志内容,分析导致游戏崩溃的主要原因,并返回 JSON 格式的解决方案。" + } }, "GameLogPage": { "placeholder": "输入关键词筛选", @@ -904,6 +912,7 @@ } }, "skip": "跳过", + "unknown": "未知", "version": { "dev": "开发版本", "nightly": "夜间构建版本", @@ -931,6 +940,10 @@ "part-2": " 继续使用 “聚合搜索”。" } }, + "enableAiProvider": { + "title": "生成式 AI 功能", + "description": "启用后,可在游戏崩溃页面使用 AI 分析崩溃原因" + }, "instancesNavType": { "title": "实例页导航栏模式", "description": "更改实例页左侧导航栏是否显示、分组方式", @@ -1292,6 +1305,31 @@ "launch": "游玩此服务器" } }, + "IntelligenceSettingsPage": { + "masterSwitch": { + "title": "智能服务", + "description": "结合您提供的生成式语言模型,为 SJMCL 引入更多智能体验" + }, + "model": { + "title": "模型", + "settings": { + "baseUrl": { + "title": "基础 URL" + }, + "apiKey": { + "title": "API 密钥" + }, + "model": { + "title": "模型名称" + }, + "checkAvailability": { + "title": "模型服务可用性", + "available": "可用", + "unavailable": "不可用" + } + } + } + }, "JavaSettingsPage": { "javaList": { "title": "Java 管理", @@ -2374,6 +2412,32 @@ } } }, + "intelligence": { + "checkLLMServiceAvailability": { + "success": "生成式 AI 服务可用", + "error": { + "title": "生成式 AI 服务不可用", + "description": { + "NETWORK_ERROR": "网络错误", + "API_PARSE_ERROR": "无法正常读取服务 API", + "NO_SUCH_MODEL": "指定模型不存在,请检查名称是否正确", + "INVALID_API_KEY": "API 密钥无效" + } + } + }, + "fetchLLMChatResponse": { + "error": { + "title": "获取 AI 聊天响应失败", + "description": { + "NETWORK_ERROR": "网络错误", + "NOT_ENABLED": "生成式 AI 功能未启用", + "API_PARSE_ERROR": "无法正常读取服务 API", + "INVALID_API_KEY": "API 密钥无效", + "NO_RESPONSE": "没有返回任何内容" + } + } + } + }, "task": { "scheduleProgressiveTaskGroup": { "error": "任务创建失败" @@ -2404,6 +2468,7 @@ "general": "通用", "appearance": "外观", "download": "下载资源", + "intelligence": "智能", "sync-restore": "同步与还原", "help": "文档与帮助", "about": "关于", diff --git a/src/models/config.ts b/src/models/config.ts index e9f7f82af..930901533 100644 --- a/src/models/config.ts +++ b/src/models/config.ts @@ -124,8 +124,16 @@ export interface LauncherConfig { autoPurgeLauncherLogs: boolean; }; }; - localGameDirectories: GameDirectory[]; globalGameConfig: GameConfig; + intelligence: { + enabled: boolean; + model: { + baseUrl: string; + apiKey: string; + model: string; + }; + }; + localGameDirectories: GameDirectory[]; discoverSourceEndpoints: string[]; extraJavaPaths: string[]; suppressedDialogs: string[]; @@ -276,8 +284,16 @@ export const defaultConfig: LauncherConfig = { autoPurgeLauncherLogs: true, }, }, - localGameDirectories: [{ name: "Current", dir: ".minecraft/" }], globalGameConfig: defaultGameConfig, + intelligence: { + enabled: false, + model: { + baseUrl: "", + apiKey: "", + model: "gpt-3.5-turbo", + }, + }, + localGameDirectories: [{ name: "Current", dir: ".minecraft/" }], discoverSourceEndpoints: [ "https://mc.sjtu.cn/api-sjmcl/article", "https://mc.sjtu.cn/api-sjmcl/article/mua", diff --git a/src/models/intelligence.ts b/src/models/intelligence.ts new file mode 100644 index 000000000..23eb6a0cb --- /dev/null +++ b/src/models/intelligence.ts @@ -0,0 +1,4 @@ +export interface ChatMessage { + role: "system" | "user" | "assistant"; + content: string; +} diff --git a/src/pages/settings/dev-test.tsx b/src/pages/settings/dev-test.tsx index acaa0eb32..6841cfe3e 100644 --- a/src/pages/settings/dev-test.tsx +++ b/src/pages/settings/dev-test.tsx @@ -1,8 +1,9 @@ import { Alert, AlertIcon, Button, VStack } from "@chakra-ui/react"; import { invoke } from "@tauri-apps/api/core"; import { useRouter } from "next/router"; -import { useEffect, useState } from "react"; +import { useEffect } from "react"; import SkinPreview from "@/components/skin-preview"; +import { useSharedModals } from "@/contexts/shared-modal"; import { DownloadTaskParam, TaskParam, TaskTypeEnums } from "@/models/task"; import { TaskService } from "@/services/task"; import { isProd } from "@/utils/env"; @@ -21,7 +22,7 @@ const DevTestPage = () => { } }, [router]); - const [task_id, setTaskId] = useState(null); + const { openSharedModal } = useSharedModals(); return ( @@ -58,6 +59,7 @@ const DevTestPage = () => { > Launch Game + + + {config.intelligence.enabled && ( + + )} + {t("GameErrorPage.bottomAlert")} diff --git a/src/pages/standalone/game-log.tsx b/src/pages/standalone/game-log.tsx index 9a9d7fe1b..000c9242b 100644 --- a/src/pages/standalone/game-log.tsx +++ b/src/pages/standalone/game-log.tsx @@ -18,6 +18,29 @@ import { LaunchService } from "@/services/launch"; import styles from "@/styles/game-log.module.css"; import { parseIdFromWindowLabel } from "@/utils/window"; +export const getLogLevel = (log: string): string => { + let lastLevel: string = "INFO"; + const match = log.match( + /\[\d{2}:\d{2}:\d{2}]\s+\[.*?\/(INFO|WARN|ERROR|DEBUG|FATAL)]/i + ); + if (match) { + lastLevel = match[1].toUpperCase(); + return lastLevel; + } + + if (/^\s+at /.test(log) || /^\s+Caused by:/.test(log) || /^\s+/.test(log)) { + return lastLevel; + } + + if (/exception|error|invalid|failed|错误/i.test(log)) { + lastLevel = "ERROR"; + return "ERROR"; + } + + lastLevel = lastLevel || "INFO"; + return "INFO"; +}; + const GameLogPage: React.FC = () => { const { t } = useTranslation(); const { config } = useLauncherConfig(); @@ -59,30 +82,6 @@ const GameLogPage: React.FC = () => { return () => unlisten(); }, []); - let lastLevel: string = "INFO"; - - const getLogLevel = (log: string): string => { - const match = log.match( - /\[\d{2}:\d{2}:\d{2}]\s+\[.*?\/(INFO|WARN|ERROR|DEBUG|FATAL)]/i - ); - if (match) { - lastLevel = match[1].toUpperCase(); - return lastLevel; - } - - if (/^\s+at /.test(log) || /^\s+Caused by:/.test(log) || /^\s+/.test(log)) { - return lastLevel; - } - - if (/exception|error|invalid|failed|错误/i.test(log)) { - lastLevel = "ERROR"; - return "ERROR"; - } - - lastLevel = lastLevel || "INFO"; - return "INFO"; - }; - const filteredLogs = logs.filter((log) => { const level = getLogLevel(log); return ( diff --git a/src/services/intelligence.ts b/src/services/intelligence.ts new file mode 100644 index 000000000..690b7333b --- /dev/null +++ b/src/services/intelligence.ts @@ -0,0 +1,37 @@ +import { invoke } from "@tauri-apps/api/core"; +import { ChatMessage } from "@/models/intelligence"; +import { InvokeResponse } from "@/models/response"; +import { responseHandler } from "@/utils/response"; + +/** + * Service class for managing intelligence services (e.g. LLM) and interactions. + */ +export class IntelligenceService { + /** + * CHECK the availability of the LLM service. + * @param {string} baseUrl The base URL of the LLM service. + * @param {string} apiKey The API key for authentication. + * @param {string} model The LLM model to be used. + * @return {Promise>} + */ + @responseHandler("intelligence") + public static async checkLLMServiceAvailability( + baseUrl: string, + apiKey: string, + model: string + ): Promise> { + return invoke("check_llm_service_availability", { baseUrl, apiKey, model }); + } + + /** + * RETRIEVE LLM chat response for a given message. + * @param {ChatMessage[]} messages The list of chat messages. + * @return {Promise>} + */ + @responseHandler("intelligence") + public static async fetchLLMChatResponse( + messages: ChatMessage[] + ): Promise> { + return invoke("fetch_llm_chat_response", { messages }); + } +}