Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src-tauri/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions src-tauri/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ axum = "0.8.6"
tower-http = { version = "0.6.6", features = ["cors"] }
sha2 = "0.10.9"
async-trait = "0.1.89"
futures-util = "0.3.31"

[target."cfg(windows)".dependencies]
winreg = "0.55.0"
Expand Down
2 changes: 1 addition & 1 deletion src-tauri/src/instance/models/misc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ pub enum ModLoaderStatus {
}

structstruck::strike! {
#[strikethrough[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize, Default)]]
#[strikethrough[derive(Debug, PartialEq, Clone, Deserialize, Serialize, Default)]]
#[strikethrough[serde(rename_all = "camelCase", deny_unknown_fields, default)]]
pub struct Instance {
pub id: String,
Expand Down
100 changes: 100 additions & 0 deletions src-tauri/src/intelligence/commands.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
use std::sync::Mutex;

use tauri::{AppHandle, Manager};
use tauri_plugin_http::reqwest;

use crate::error::SJMCLResult;
use crate::intelligence::models::{
ChatCompletionRequest, ChatCompletionResponse, ChatMessage, ChatModelsResponse, LLMServiceError,
};
use crate::launcher_config::models::LauncherConfig;

// TODO: make chat completion as helper funtion
// TODO: migrate log analysis logic to backend (w multi-language prompt and result parsing)

#[tauri::command]
pub async fn check_llm_service_availability(
app: AppHandle,
base_url: String,
api_key: String,
model: String,
) -> SJMCLResult<()> {
let client = app.state::<reqwest::Client>();
let response = client
.get(format!("{}/v1/models", base_url))
.bearer_auth(api_key)
.send()
.await
.map_err(|e| {
log::error!("Error connecting to LLM service: {}", e);
LLMServiceError::NetworkError
})?;

if response.status().is_success() {
let models_response = response.json::<ChatModelsResponse>().await.map_err(|e| {
log::error!("Error parsing LLM service response: {}", e);
LLMServiceError::ApiParseError
})?;
if models_response.data.iter().any(|m| m.id == model) {
Ok(())
} else {
Err(LLMServiceError::NoSuchModel.into())
}
} else {
Err(LLMServiceError::InvalidAPIKey.into())
}
}

#[tauri::command]
pub async fn fetch_llm_chat_response(
app: AppHandle,
messages: Vec<ChatMessage>,
) -> SJMCLResult<String> {
let client = reqwest::Client::new(); // use a separate client instance w/o timeout.

let (enabled, model_config) = {
let config_binding = app.state::<Mutex<LauncherConfig>>();
let config_state = config_binding.lock()?;
(
config_state.intelligence.enabled,
config_state.intelligence.model.clone(),
)
};

if !enabled {
return Err(LLMServiceError::NotEnabled.into());
}

let response = client
.post(format!("{}/v1/chat/completions", model_config.base_url))
.bearer_auth(&model_config.api_key)
.json(&ChatCompletionRequest {
model: model_config.model.clone(),
messages,
stream: false,
})
.send()
.await
.map_err(|e| {
log::error!("Error connecting to AI service: {}", e);
LLMServiceError::NetworkError
})?;

if response.status().is_success() {
let completion_response = response
.json::<ChatCompletionResponse>()
.await
.map_err(|e| {
log::error!("Error parsing AI service response: {}", e);
LLMServiceError::ApiParseError
})?;
if let Some(choice) = completion_response.choices.first() {
Ok(choice.message.content.clone())
} else {
Err(LLMServiceError::NoResponse.into())
}
} else {
log::error!("AI service returned error status: {}", response.status());
Err(LLMServiceError::NetworkError.into())
}
}
2 changes: 2 additions & 0 deletions src-tauri/src/intelligence/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
pub mod commands;
pub mod models;
53 changes: 53 additions & 0 deletions src-tauri/src/intelligence/models.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
use serde::{Deserialize, Serialize};
use strum_macros::Display;

structstruck::strike! {
#[strikethrough[derive(Serialize, Deserialize)]]
pub struct ChatModelsResponse {
pub data: Vec<pub struct ChatModel {
pub id: String,
pub object: String,
pub created: u64,
pub owned_by: String,
}>,
}
}

structstruck::strike! {
#[strikethrough[derive(Serialize, Deserialize)]]
pub struct ChatCompletionRequest {
pub model: String,
pub messages: Vec<pub struct ChatMessage {
pub role: String,
pub content: String,
}>,
pub stream: bool,
}
}

structstruck::strike! {
#[strikethrough[derive(Serialize, Deserialize)]]
pub struct ChatCompletionResponse {
pub id: String,
pub object: String,
pub created: u64,
pub choices: Vec<pub struct ChatCompletionChoice {
pub index: u32,
pub message: ChatMessage,
pub finish_reason: String,
}>,
}
}

#[derive(Debug, Display)]
#[strum(serialize_all = "SCREAMING_SNAKE_CASE")]
pub enum LLMServiceError {
ApiParseError,
InvalidAPIKey,
NetworkError,
NotEnabled,
NoSuchModel,
NoResponse,
}

impl std::error::Error for LLMServiceError {}
18 changes: 15 additions & 3 deletions src-tauri/src/launcher_config/models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ pub enum LauncherVisiablity {
// assert!(config.access("114514").is_err())
//
structstruck::strike! {
#[strikethrough[derive(Partial, Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]]
#[strikethrough[derive(Partial, Debug, PartialEq, Clone, Deserialize, Serialize)]]
#[strikethrough[serde(rename_all = "camelCase", deny_unknown_fields)]]
#[strikethrough[derive(SmartDefault)]]
#[strikethrough[serde(default)]]
Expand Down Expand Up @@ -163,7 +163,7 @@ pub enum ProxyType {
}

structstruck::strike! {
#[strikethrough[derive(Partial, Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]]
#[strikethrough[derive(Partial, Debug, PartialEq, Clone, Deserialize, Serialize)]]
#[strikethrough[serde(rename_all = "camelCase", deny_unknown_fields)]]
#[strikethrough[derive(SmartDefault)]]
#[strikethrough[serde(default)]]
Expand Down Expand Up @@ -259,6 +259,18 @@ structstruck::strike! {
}
},
pub global_game_config: GameConfig,
pub intelligence: struct IntelligenceConfig {
pub enabled: bool,
pub model: struct LLMModelConfig {
pub base_url: String,
pub api_key: String,
pub model: String,
},
// pub chat: struct LLMChatConfig {
// #[default = 0.7]
// pub temperature: f32,
// }
},
pub local_game_directories: Vec<GameDirectory>,
#[default(_code="vec![\"https://mc.sjtu.cn/api-sjmcl/article\".to_string(),
\"https://mc.sjtu.cn/api-sjmcl/article/mua\".to_string()]")]
Expand Down Expand Up @@ -294,7 +306,7 @@ structstruck::strike! {
#[default([true, true])]
pub accordion_states: [bool; 2],
},
}
},
}
}

Expand Down
3 changes: 3 additions & 0 deletions src-tauri/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ mod account;
mod discover;
mod error;
mod instance;
mod intelligence;
mod launch;
mod launcher_config;
mod partial;
Expand Down Expand Up @@ -142,6 +143,8 @@ pub async fn run() {
resource::commands::fetch_remote_resource_by_id,
discover::commands::fetch_news_sources_info,
discover::commands::fetch_news_post_summaries,
intelligence::commands::check_llm_service_availability,
intelligence::commands::fetch_llm_chat_response,
tasks::commands::schedule_progressive_task_group,
tasks::commands::cancel_progressive_task,
tasks::commands::resume_progressive_task,
Expand Down
2 changes: 2 additions & 0 deletions src/layouts/settings-layout.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import {
LuPalette,
LuRefreshCcw,
LuSettings,
LuSparkles,
} from "react-icons/lu";
import NavMenu from "@/components/common/nav-menu";
import { isDev } from "@/utils/env";
Expand All @@ -34,6 +35,7 @@ const SettingsLayout: React.FC<SettingsLayoutProps> = ({ children }) => {
{ key: "general", icon: LuSettings },
{ key: "appearance", icon: LuPalette },
{ key: "download", icon: LuCloudDownload },
{ key: "intelligence", icon: LuSparkles },
{ key: "sync-restore", icon: LuRefreshCcw },
{ key: "help", icon: LuCircleHelp },
{ key: "about", icon: LuInfo },
Expand Down
69 changes: 67 additions & 2 deletions src/locales/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -829,14 +829,22 @@
"MOD_INTERNET_ERROR": "The instance cannot continue due to mod download failure. Please check your network connection or use a proxy.",
"VICS_MODERN_WARFARE_ERROR": "The instance cannot continue due to Vic's Modern Warfare mod error. Please try updating or removing the mod.",
"FORGE_LITELOADER_CONFLICT": "The instance cannot continue due to Forge and LiteLoader conflict. Please try removing LiteLoader or changing Forge version.",
"UNKNOWN": "Cannot determine the cause of the crash. Please check the logs for more details or export the crash report."
"UNKNOWN": "Cannot determine the cause of the crash. You can use AI analysis, or check the logs for more details and export the crash report."
},
"button": {
"aiAnalysis": "AI Analysis",
"exportGameInfo": "Export Crash Report",
"gameLogs": "Game Logs",
"help": "Help"
},
"bottomAlert": "Export and send the crash report instead of a screenshot of this page for help."
"bottomAlert": "Export and send the crash report instead of a screenshot of this page for help.",
"aiAnalysis": {
"otherInfo": "Other Information",
"structureNotProcessed": "AI did not return a valid JSON structure, below is its original response:",
"systemPrompt": "You are a Minecraft launch/crash diagnostics expert. Only output in the following JSON format, with no greetings, explanations, or extra text before/after. \n\n {\n \"reasons\": [ // Main causes and solutions (keep different)\n { \"reason\": \"One-sentence cause\", \"fix\": \"Simple solution, support Markdown syntax and may include possible causes, troubleshooting steps, commands/paths, etc.\" }\n ]\n}\n\n Requirements:\n - Strictly output only JSON, nothing else.\n - \"reasons\" should have less than 5 items, each cause should be simple and effective.\n - Focus on this log, do not give generic advice.\n - Prevent outputting guesses and solutions to problems related to the SJMCL launcher itself.",
"title": "AI Crash Analysis",
"userPrompt": "The player's game has crashed. The player is using {{os}} operating system, Java version {{javaVersion}}, Minecraft version {{mcVersion}} and SJMCL launcher.\n\nHere is the relevant part of the game crash log:\n\n{{log}}\n\nBased on the log content, please analyze the main cause of the game crash and return a solution in JSON format."
}
},
"GameLogPage": {
"placeholder": "Filter by Text",
Expand Down Expand Up @@ -904,6 +912,7 @@
"error": "Failed to share"
}
},
"unknown": "Unknown",
"version": {
"dev": "development version",
"nightly": "nightly build",
Expand Down Expand Up @@ -931,6 +940,10 @@
"part-2": "."
}
},
"enableAiProvider": {
"title": "Generative AI Features",
"description": "When enabled, AI can analyze crash causes on the game crash page."
},
"instancesNavType": {
"title": "Instance Page Navigation Mode",
"description": "Change whether the instance page's left navigation bar is displayed and its grouping method.",
Expand Down Expand Up @@ -1292,6 +1305,31 @@
"launch": "Play this Server"
}
},
"IntelligenceSettingsPage": {
"masterSwitch": {
"title": "Intelligent Services",
"description": "Enhance SJMCL with intelligent features powered by user-provided generative language models"
},
"model": {
"title": "Model",
"settings": {
"baseUrl": {
"title": "Base URL"
},
"apiKey": {
"title": "API Key"
},
"model": {
"title": "Model Name"
},
"checkAvailability": {
"title": "Model Service Availability",
"available": "Available",
"unavailable": "Unavailable"
}
}
}
},
"JavaSettingsPage": {
"javaList": {
"title": "Java Management",
Expand Down Expand Up @@ -2374,6 +2412,32 @@
}
}
},
"intelligence": {
"checkLLMServiceAvailability": {
"success": "GenAI service available",
"error": {
"title": "GenAI service unavailable",
"description": {
"NETWORK_ERROR": "Network error",
"API_PARSE_ERROR": "Failed to parse service API",
"NO_SUCH_MODEL": "The specified model does not exist, please check if the name is correct",
"INVALID_API_KEY": "Invalid API key"
}
}
},
"fetchLLMChatResponse": {
"error": {
"title": "Failed to retrieve AI chat response",
"description": {
"NETWORK_ERROR": "Network error",
"NOT_ENABLED": "GenAI feature not enabled",
"API_PARSE_ERROR": "Failed to parse service API",
"INVALID_API_KEY": "Invalid API key",
"NO_RESPONSE": "No response received"
}
}
}
},
"task": {
"scheduleProgressiveTaskGroup": {
"error": "Failed to create task"
Expand Down Expand Up @@ -2404,6 +2468,7 @@
"general": "General",
"appearance": "Appearance",
"download": "Download",
"intelligence": "Intelligence",
"sync-restore": "Sync & Restore",
"help": "Docs & Help",
"about": "About",
Expand Down
Loading
Loading