From b4ccdf2912abc2beada8b997e714a7411d07d888 Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sat, 21 Mar 2026 20:30:38 +0100 Subject: [PATCH 01/18] feat: full English UI translation Translated all Vue frontend components and views from Chinese to English. This is the first complete EN localization of the MiroFish UI. - All 15 Vue files translated (views + components) - docker-compose.yml updated to build locally from source --- docker-compose.yml | 4 +- frontend/src/components/GraphPanel.vue | 14 +- frontend/src/components/HistoryDatabase.vue | 48 ++--- frontend/src/components/Step1GraphBuild.vue | 42 ++--- frontend/src/components/Step2EnvSetup.vue | 188 +++++++++---------- frontend/src/components/Step3Simulation.vue | 2 +- frontend/src/components/Step4Report.vue | 4 +- frontend/src/components/Step5Interaction.vue | 26 +-- frontend/src/views/Home.vue | 72 +++---- frontend/src/views/InteractionView.vue | 4 +- frontend/src/views/MainView.vue | 4 +- frontend/src/views/Process.vue | 86 ++++----- frontend/src/views/ReportView.vue | 4 +- frontend/src/views/SimulationRunView.vue | 4 +- frontend/src/views/SimulationView.vue | 4 +- 15 files changed, 252 insertions(+), 254 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 637f1dfae..d5b984602 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,8 +1,6 @@ services: mirofish: - image: ghcr.io/666ghj/mirofish:latest - # 加速镜像(如拉取缓慢可替换上方地址) - # image: ghcr.nju.edu.cn/666ghj/mirofish:latest + build: . container_name: mirofish env_file: - .env diff --git a/frontend/src/components/GraphPanel.vue b/frontend/src/components/GraphPanel.vue index 314c966e4..7d7bb2428 100644 --- a/frontend/src/components/GraphPanel.vue +++ b/frontend/src/components/GraphPanel.vue @@ -4,11 +4,11 @@ Graph Relationship Visualization
- -
@@ -27,7 +27,7 @@ - {{ isSimulating ? 'GraphRAG长短期记忆实时更新中' : '实时更新中...' }} + {{ isSimulating ? 'GraphRAG memory updating in real-time' : 'Updating in real-time...' }} @@ -39,8 +39,8 @@ - 还有少量内容处理中,建议稍后手动刷新图谱 - @@ -337,7 +337,7 @@ const truncateText = (text, maxLength) => { // 从模拟需求生成标题(取前20字) const getSimulationTitle = (requirement) => { - if (!requirement) return '未命名模拟' + if (!requirement) return 'Unnamed Simulation' const title = requirement.slice(0, 20) return requirement.length > 20 ? title + '...' : title } @@ -353,8 +353,8 @@ const formatSimulationId = (simulationId) => { const formatRounds = (simulation) => { const current = simulation.current_round || 0 const total = simulation.total_rounds || 0 - if (total === 0) return '未开始' - return `${current}/${total} 轮` + if (total === 0) return 'Not started' + return `${current}/${total} rounds` } // 获取文件类型(用于样式) @@ -382,7 +382,7 @@ const getFileTypeLabel = (filename) => { // 截断文件名(保留扩展名) const truncateFilename = (filename, maxLength) => { - if (!filename) return '未知文件' + if (!filename) return 'Unknown file' if (filename.length <= maxLength) return filename const ext = filename.includes('.') ? '.' + filename.split('.').pop() : '' diff --git a/frontend/src/components/Step1GraphBuild.vue b/frontend/src/components/Step1GraphBuild.vue index de33a3fd1..975346d41 100644 --- a/frontend/src/components/Step1GraphBuild.vue +++ b/frontend/src/components/Step1GraphBuild.vue @@ -6,25 +6,25 @@
01 - 本体生成 + Ontology Generation
- 已完成 - 生成中 - 等待 + Completed + Processing + Pending

POST /api/graph/ontology/generate

- LLM分析文档内容与模拟需求,提取出现实种子,自动生成合适的本体结构 + LLM analyzes document content and simulation requirements, extracts reality seeds, and automatically generates the appropriate ontology structure

- {{ ontologyProgress.message || '正在分析文档...' }} + {{ ontologyProgress.message || 'Analyzing documents...' }}
@@ -110,34 +110,34 @@
02 - GraphRAG构建 + GraphRAG Build
- 已完成 + Completed {{ buildProgress?.progress || 0 }}% - 等待 + Pending

POST /api/graph/build

- 基于生成的本体,将文档自动分块后调用 Zep 构建知识图谱,提取实体和关系,并形成时序记忆与社区摘要 + Based on the generated ontology, documents are automatically chunked and Zep is called to build a knowledge graph, extracting entities and relationships, and forming temporal memory and community summaries

{{ graphStats.nodes }} - 实体节点 + Entity Nodes
{{ graphStats.edges }} - 关系边 + Relation Edges
{{ graphStats.types }} - SCHEMA类型 + Schema Types
@@ -148,23 +148,23 @@
03 - 构建完成 + Build Complete
- 进行中 + In Progress

POST /api/simulation/create

-

图谱构建已完成,请进入下一步进行模拟环境搭建

-
@@ -233,11 +233,11 @@ const handleEnterEnvSetup = async () => { }) } else { console.error('创建模拟失败:', res.error) - alert('创建模拟失败: ' + (res.error || '未知错误')) + alert('Failed to create simulation: ' + (res.error || 'Unknown error')) } } catch (err) { console.error('创建模拟异常:', err) - alert('创建模拟异常: ' + err.message) + alert('Error creating simulation: ' + err.message) } finally { creatingSimulation.value = false } diff --git a/frontend/src/components/Step2EnvSetup.vue b/frontend/src/components/Step2EnvSetup.vue index eae776aaf..adcd0f65e 100644 --- a/frontend/src/components/Step2EnvSetup.vue +++ b/frontend/src/components/Step2EnvSetup.vue @@ -6,18 +6,18 @@
01 - 模拟实例初始化 + Simulation Instance Init
- 已完成 - 初始化 + Completed + Initializing

POST /api/simulation/create

- 新建simulation实例,拉取模拟世界参数模版 + Create a new simulation instance and fetch world parameter templates

@@ -35,7 +35,7 @@
Task ID - {{ taskId || '异步任务已完成' }} + {{ taskId || 'Async task completed' }}
@@ -46,41 +46,41 @@
02 - 生成 Agent 人设 + Generate Agent Personas
- 已完成 + Completed {{ prepareProgress }}% - 等待 + Pending

POST /api/simulation/prepare

- 结合上下文,自动调用工具从知识图谱梳理实体与关系,初始化模拟个体,并基于现实种子赋予他们独特的行为与记忆 + Automatically calls tools to map entities and relationships from the knowledge graph, initializes simulation individuals, and assigns unique behaviors and memories based on reality seeds

{{ profiles.length }} - 当前Agent数 + Current Agents
{{ expectedTotal || '-' }} - 预期Agent总数 + Expected Total
{{ totalTopicsCount }} - 现实种子当前关联话题数 + Current Topics
- 已生成的 Agent 人设 + Generated Agent Personas
@{{ profile.name || `agent_${idx}` }}
- {{ profile.profession || '未知职业' }} + {{ profile.profession || 'Unknown' }}
-

{{ profile.bio || '暂无简介' }}

+

{{ profile.bio || 'No bio' }}

03 - 生成双平台模拟配置 + Generate Dual-Platform Config
- 已完成 - 生成中 - 等待 + Completed + Processing + Pending

POST /api/simulation/prepare

- LLM 根据模拟需求与现实种子,智能设置世界时间流速、推荐算法、每个个体的活跃时间段、发言频率、事件触发等参数 + LLM intelligently configures world time flow, recommendation algorithms, active periods, posting frequency, and event triggers based on simulation requirements and reality seeds

@@ -139,40 +139,40 @@
- 模拟时长 - {{ simulationConfig.time_config?.total_simulation_hours || '-' }} 小时 + Duration + {{ simulationConfig.time_config?.total_simulation_hours || '-' }} hours
- 每轮时长 - {{ simulationConfig.time_config?.minutes_per_round || '-' }} 分钟 + Round Duration + {{ simulationConfig.time_config?.minutes_per_round || '-' }} min
- 总轮次 - {{ Math.floor((simulationConfig.time_config?.total_simulation_hours * 60 / simulationConfig.time_config?.minutes_per_round)) || '-' }} 轮 + Total Rounds + {{ Math.floor((simulationConfig.time_config?.total_simulation_hours * 60 / simulationConfig.time_config?.minutes_per_round)) || '-' }}
- 每小时活跃 + Agents/Hour {{ simulationConfig.time_config?.agents_per_hour_min }}-{{ simulationConfig.time_config?.agents_per_hour_max }}
- 高峰时段 + Peak Hours {{ simulationConfig.time_config?.peak_hours?.join(':00, ') }}:00 ×{{ simulationConfig.time_config?.peak_activity_multiplier }}
- 工作时段 + Work Hours {{ simulationConfig.time_config?.work_hours?.[0] }}:00-{{ simulationConfig.time_config?.work_hours?.slice(-1)[0] }}:00 ×{{ simulationConfig.time_config?.work_activity_multiplier }}
- 早间时段 + Morning Hours {{ simulationConfig.time_config?.morning_hours?.[0] }}:00-{{ simulationConfig.time_config?.morning_hours?.slice(-1)[0] }}:00 ×{{ simulationConfig.time_config?.morning_activity_multiplier }}
- 低谷时段 + Off-Peak Hours {{ simulationConfig.time_config?.off_peak_hours?.[0] }}:00-{{ simulationConfig.time_config?.off_peak_hours?.slice(-1)[0] }}:00 ×{{ simulationConfig.time_config?.off_peak_activity_multiplier }}
@@ -182,7 +182,7 @@
- Agent 配置 + Agent Config {{ simulationConfig.agent_configs?.length || 0 }} 个
@@ -205,7 +205,7 @@
- 活跃时段 + Active Hours
- 发帖/时 + Posts/hr {{ agent.posts_per_hour }}
- 评论/时 + Comments/hr {{ agent.comments_per_hour }}
- 响应延迟 + Response Delay {{ agent.response_delay_min }}-{{ agent.response_delay_max }}min
- 活跃度 + Activity {{ (agent.activity_level * 100).toFixed(0) }}%
- 情感倾向 + Sentiment Bias {{ agent.sentiment_bias > 0 ? '+' : '' }}{{ agent.sentiment_bias?.toFixed(1) }}
- 影响力 + Influence {{ agent.influence_weight?.toFixed(1) }}
@@ -267,59 +267,59 @@
- 推荐算法配置 + Recommendation Algorithm
- 平台 1:广场 / 信息流 + Platform 1: Feed / Timeline
- 时效权重 + Recency Weight {{ simulationConfig.twitter_config.recency_weight }}
- 热度权重 + Popularity Weight {{ simulationConfig.twitter_config.popularity_weight }}
- 相关性权重 + Relevance Weight {{ simulationConfig.twitter_config.relevance_weight }}
- 病毒阈值 + Viral Threshold {{ simulationConfig.twitter_config.viral_threshold }}
- 回音室强度 + Echo Chamber Strength {{ simulationConfig.twitter_config.echo_chamber_strength }}
- 平台 2:话题 / 社区 + Platform 2: Topics / Community
- 时效权重 + Recency Weight {{ simulationConfig.reddit_config.recency_weight }}
- 热度权重 + Popularity Weight {{ simulationConfig.reddit_config.popularity_weight }}
- 相关性权重 + Relevance Weight {{ simulationConfig.reddit_config.relevance_weight }}
- 病毒阈值 + Viral Threshold {{ simulationConfig.reddit_config.viral_threshold }}
- 回音室强度 + Echo Chamber Strength {{ simulationConfig.reddit_config.echo_chamber_strength }}
@@ -330,7 +330,7 @@
- LLM 配置推理 + LLM Config Reasoning
04 - 初始激活编排 + Initial Activation Orchestration
- 已完成 - 编排中 - 等待 + Completed + Orchestrating + Pending

POST /api/simulation/prepare

- 基于叙事方向,自动生成初始激活事件与热点话题,引导模拟世界的初始状态 + Automatically generates initial activation events and hot topics based on narrative direction to guide the initial state of the simulation world

@@ -380,14 +380,14 @@ - 叙事引导方向 + Narrative Direction

{{ simulationConfig.event_config.narrative_direction }}

- 初始热点话题 + Initial Hot Topics
# {{ topic }} @@ -397,7 +397,7 @@
- 初始激活序列 ({{ simulationConfig.event_config.initial_posts.length }}) + Initial Activation Sequence ({{ simulationConfig.event_config.initial_posts.length }})
@@ -423,29 +423,29 @@
05 - 准备完成 + Setup Complete
- 进行中 - 等待 + In Progress + Pending

POST /api/simulation/start

-

模拟环境已准备完成,可以开始运行模拟

+

Simulation environment is ready, you can now start running the simulation

- 模拟轮数设定 - MiroFish 自动规划推演现实 {{ simulationConfig?.time_config?.total_simulation_hours || '-' }} 小时,每轮代表现实 {{ simulationConfig?.time_config?.minutes_per_round || '-' }} 分钟时间流逝 + Simulation Rounds + MiroFish auto-plans {{ simulationConfig?.time_config?.total_simulation_hours || '-' }} real-world hours, each round represents {{ simulationConfig?.time_config?.minutes_per_round || '-' }} minutes of real time
@@ -454,10 +454,10 @@
{{ customMaxRounds }} - + rounds
- 若Agent规模为100:预计耗时约 {{ Math.round(customMaxRounds * 0.6) }} 分钟 + For 100 agents: estimated ~{{ Math.round(customMaxRounds * 0.6) }} min
@@ -478,7 +478,7 @@ :class="{ active: customMaxRounds === 40 }" @click="customMaxRounds = 40" :style="{ position: 'absolute', left: `calc(${(40 - 10) / (autoGeneratedRounds - 10) * 100}% - 30px)` }" - >40 (推荐) + >40 (Rec.) {{ autoGeneratedRounds }}
@@ -488,7 +488,7 @@
{{ autoGeneratedRounds }} - + rounds
@@ -497,11 +497,11 @@ - 若Agent规模为100:预计耗时 {{ Math.round(autoGeneratedRounds * 0.6) }} 分钟 + For 100 agents: estimated {{ Math.round(autoGeneratedRounds * 0.6) }} min
-

若首次运行,强烈建议切换至‘自定义模式’减少模拟轮数,以便快速预览效果并降低报错风险 ➝

+

First run? Strongly recommended to switch to ‘Custom’ mode and reduce rounds for a quick preview and lower error risk ➝

@@ -514,14 +514,14 @@ class="action-btn secondary" @click="$emit('go-back')" > - ← 返回图谱构建 + ← Back to Graph Build
@@ -547,32 +547,32 @@
@@ -129,7 +129,7 @@
- 正在生成{{ section.title }}... + Generating {{ section.title }}...
@@ -98,7 +98,7 @@ - 与Report Agent对话 + Chat with Report Agent
@@ -141,7 +141,7 @@ - 发送问卷调查到世界中 + Send Survey
@@ -155,7 +155,7 @@
R
Report Agent - Chat
-
报告生成智能体的快速对话版本,可调用 4 种专业工具,拥有MiroFish的完整记忆
+
Quick chat with Report Agent — 4 specialized tools, full MiroFish memory
-
InsightForge 深度归因
-
对齐现实世界种子数据与模拟环境状态,结合Global/Local Memory机制,提供跨时空的深度归因分析
+
InsightForge Deep Analysis
+
Aligns real-world seed data with simulation state, providing cross-temporal deep attribution analysis via Global/Local Memory
@@ -184,8 +184,8 @@
-
PanoramaSearch 全景追踪
-
基于图结构的广度遍历算法,重构事件传播路径,捕获全量信息流动的拓扑结构
+
PanoramaSearch Global Tracking
+
Graph-based breadth traversal algorithm reconstructing event propagation paths and information flow topology
@@ -195,8 +195,8 @@
-
QuickSearch 快速检索
-
基于 GraphRAG 的即时查询接口,优化索引效率,用于快速提取具体的节点属性与离散事实
+
QuickSearch Fast Query
+
GraphRAG-based instant query interface for fast extraction of node attributes and discrete facts
diff --git a/frontend/src/views/Home.vue b/frontend/src/views/Home.vue index afe01a0c4..30c202bb3 100644 --- a/frontend/src/views/Home.vue +++ b/frontend/src/views/Home.vue @@ -5,7 +5,7 @@ @@ -15,21 +15,21 @@
- 简洁通用的群体智能引擎 - / v0.1-预览版 + Simple Universal Swarm Intelligence Engine + / v0.1-Preview

- 上传任意报告
- 即刻推演未来 + Upload Any Report
+ Predict the Future

- 即使只有一段文字,MiroFish 也能基于其中的现实种子,全自动生成与之对应的至多百万级Agent构成的平行世界。通过上帝视角注入变量,在复杂的群体交互中寻找动态环境下的“局部最优解” + Even from a single paragraph, MiroFish can automatically generate a parallel world with up to million-scale Agents based on real-world seeds. Inject variables from a god's-eye view to find the ”local optimum” in complex group interactions.

- 让未来在 Agent 群中预演,让决策在百战后胜出_ + Let the future be rehearsed in Agent swarms, and decisions forged through simulation_

@@ -53,65 +53,65 @@
- 系统状态 + System Status
- -

准备就绪

+ +

Ready

- 预测引擎待命中,可上传多份非结构化数据以初始化模拟序列 + Prediction engine on standby. Upload unstructured data to initialize the simulation sequence.

-
低成本
-
常规模拟平均5$/次
+
Low Cost
+
~$5 per simulation
-
高可用
-
最多百万级Agent模拟
+
Scalable
+
Up to million-scale Agents
- 工作流序列 + Workflow Sequence
01
-
图谱构建
-
现实种子提取 & 个体与群体记忆注入 & GraphRAG构建
+
Graph Build
+
Reality seed extraction & individual/group memory injection & GraphRAG build
02
-
环境搭建
-
实体关系抽取 & 人设生成 & 环境配置Agent注入仿真参数
+
Environment Setup
+
Entity-relation extraction & persona generation & environment config Agent injection
03
-
开始模拟
-
双平台并行模拟 & 自动解析预测需求 & 动态更新时序记忆
+
Run Simulation
+
Dual-platform parallel simulation & auto-parse prediction requirements & dynamic temporal memory updates
04
-
报告生成
-
ReportAgent拥有丰富的工具集与模拟后环境进行深度交互
+
Report Generation
+
ReportAgent uses a rich toolset for deep interaction with the post-simulation environment
05
-
深度互动
-
与模拟世界中的任意一位进行对话 & 与ReportAgent进行对话
+
Deep Interaction
+
Chat with any agent in the simulated world & interact with ReportAgent
@@ -124,8 +124,8 @@
- 01 / 现实种子 - 支持格式: PDF, MD, TXT + 01 / Reality Seed + Supported formats: PDF, MD, TXT
-
拖拽文件上传
-
或点击浏览文件系统
+
Drag & Drop Files
+
or click to browse
@@ -164,23 +164,23 @@
- 输入参数 + Input Parameters
- >_ 02 / 模拟提示词 + >_ 02 / Simulation Prompt
-
引擎: MiroFish-V1.0
+
Engine: MiroFish-V1.0
@@ -191,8 +191,8 @@ @click="startSimulation" :disabled="!canSubmit || loading" > - 启动引擎 - 初始化中... + Launch Engine + Initializing...
diff --git a/frontend/src/views/InteractionView.vue b/frontend/src/views/InteractionView.vue index b153590d7..a3c2686d4 100644 --- a/frontend/src/views/InteractionView.vue +++ b/frontend/src/views/InteractionView.vue @@ -15,7 +15,7 @@ :class="{ active: viewMode === mode }" @click="viewMode = mode" > - {{ { graph: '图谱', split: '双栏', workbench: '工作台' }[mode] }} + {{ { graph: 'Graph', split: 'Split', workbench: 'Workbench' }[mode] }}
@@ -23,7 +23,7 @@
Step 5/5 - 深度互动 + Deep Interaction
diff --git a/frontend/src/views/MainView.vue b/frontend/src/views/MainView.vue index 6ff299112..aae184ee5 100644 --- a/frontend/src/views/MainView.vue +++ b/frontend/src/views/MainView.vue @@ -15,7 +15,7 @@ :class="{ active: viewMode === mode }" @click="viewMode = mode" > - {{ { graph: '图谱', split: '双栏', workbench: '工作台' }[mode] }} + {{ { graph: 'Graph', split: 'Split', workbench: 'Workbench' }[mode] }}
@@ -91,7 +91,7 @@ const viewMode = ref('split') // graph | split | workbench // Step State const currentStep = ref(1) // 1: 图谱构建, 2: 环境搭建, 3: 开始模拟, 4: 报告生成, 5: 深度互动 -const stepNames = ['图谱构建', '环境搭建', '开始模拟', '报告生成', '深度互动'] +const stepNames = ['Graph Build', 'Env Setup', 'Simulation', 'Report', 'Interaction'] // Data State const currentProjectId = ref(route.params.projectId) diff --git a/frontend/src/views/Process.vue b/frontend/src/views/Process.vue index 2d2d3cc1a..07c886a2e 100644 --- a/frontend/src/views/Process.vue +++ b/frontend/src/views/Process.vue @@ -7,7 +7,7 @@ -

等待本体生成

-

生成完成后将自动开始构建图谱

+

Waiting for ontology generation

+

Graph build will start automatically once complete

@@ -200,8 +200,8 @@
-

图谱构建中

-

数据即将显示...

+

Building graph...

+

Data will appear shortly...

@@ -225,7 +225,7 @@
- 构建流程 + Build Process
@@ -234,7 +234,7 @@
01
-
本体生成
+
Ontology Generation
/api/graph/ontology/generate
@@ -244,15 +244,15 @@
-
接口说明
+
Description
- 上传文档后,LLM分析文档内容,自动生成适合舆论模拟的本体结构(实体类型 + 关系类型) + After uploading documents, the LLM analyzes the content and automatically generates an ontology structure (entity types + relation types) suitable for opinion simulation
-
生成进度
+
Generation Progress
{{ ontologyProgress.message }} @@ -261,7 +261,7 @@
-
生成的实体类型 ({{ projectData.ontology.entity_types?.length || 0 }})
+
Generated Entity Types ({{ projectData.ontology.entity_types?.length || 0 }})
-
生成的关系类型 ({{ projectData.ontology.relation_types?.length || 0 }})
+
Generated Relation Types ({{ projectData.ontology.relation_types?.length || 0 }})
{{ rel.target_type }}
- +{{ projectData.ontology.relation_types.length - 5 }} 更多关系... + +{{ projectData.ontology.relation_types.length - 5 }} more relations...
-
等待本体生成...
+
Waiting for ontology generation...
@@ -305,7 +305,7 @@
02
-
图谱构建
+
Graph Build
/api/graph/build
@@ -315,20 +315,20 @@
-
接口说明
+
Description
- 基于生成的本体,将文档分块后调用 Zep API 构建知识图谱,提取实体和关系 + Using the generated ontology, chunks the documents and calls the Zep API to build a knowledge graph, extracting entities and relations
-
等待本体生成完成...
+
Waiting for ontology to complete...
-
构建进度
+
Build Progress
@@ -339,19 +339,19 @@
-
构建结果
+
Build Results
{{ graphData.node_count }} - 实体节点 + Entity Nodes
{{ graphData.edge_count }} - 关系边 + Relation Edges
{{ entityTypes.length }} - 实体类型 + Entity Types
@@ -363,8 +363,8 @@
03
-
构建完成
-
准备进入下一步骤
+
Build Complete
+
Ready for next step
{{ getPhaseStatusText(2) }} @@ -375,7 +375,7 @@
@@ -385,23 +385,23 @@
- 项目信息 + Project Info
- 项目名称 + Project Name {{ projectData.name }}
- 项目ID + Project ID {{ projectData.project_id }}
- 图谱ID + Graph ID {{ projectData.graph_id }}
- 模拟需求 + Simulation Requirement {{ projectData.simulation_requirement || '-' }}
@@ -451,11 +451,11 @@ const statusClass = computed(() => { }) const statusText = computed(() => { - if (error.value) return '构建失败' - if (currentPhase.value >= 2) return '构建完成' - if (currentPhase.value === 1) return '图谱构建中' - if (currentPhase.value === 0) return '本体生成中' - return '初始化中' + if (error.value) return 'Build Failed' + if (currentPhase.value >= 2) return 'Build Complete' + if (currentPhase.value === 1) return 'Building Graph' + if (currentPhase.value === 0) return 'Generating Ontology' + return 'Initializing' }) const entityTypes = computed(() => { diff --git a/frontend/src/views/ReportView.vue b/frontend/src/views/ReportView.vue index 84a3e2a3f..09962bb6f 100644 --- a/frontend/src/views/ReportView.vue +++ b/frontend/src/views/ReportView.vue @@ -15,7 +15,7 @@ :class="{ active: viewMode === mode }" @click="viewMode = mode" > - {{ { graph: '图谱', split: '双栏', workbench: '工作台' }[mode] }} + {{ { graph: 'Graph', split: 'Split', workbench: 'Workbench' }[mode] }}
@@ -23,7 +23,7 @@
Step 4/5 - 报告生成 + Report Generation
diff --git a/frontend/src/views/SimulationRunView.vue b/frontend/src/views/SimulationRunView.vue index 14ebc5f9d..32a250dd1 100644 --- a/frontend/src/views/SimulationRunView.vue +++ b/frontend/src/views/SimulationRunView.vue @@ -15,7 +15,7 @@ :class="{ active: viewMode === mode }" @click="viewMode = mode" > - {{ { graph: '图谱', split: '双栏', workbench: '工作台' }[mode] }} + {{ { graph: 'Graph', split: 'Split', workbench: 'Workbench' }[mode] }}
@@ -23,7 +23,7 @@
Step 3/5 - 开始模拟 + Run Simulation
diff --git a/frontend/src/views/SimulationView.vue b/frontend/src/views/SimulationView.vue index 4b44b3972..4dbc52b67 100644 --- a/frontend/src/views/SimulationView.vue +++ b/frontend/src/views/SimulationView.vue @@ -15,7 +15,7 @@ :class="{ active: viewMode === mode }" @click="viewMode = mode" > - {{ { graph: '图谱', split: '双栏', workbench: '工作台' }[mode] }} + {{ { graph: 'Graph', split: 'Split', workbench: 'Workbench' }[mode] }}
@@ -23,7 +23,7 @@
Step 2/5 - 环境搭建 + Environment Setup
From 6f176e3d1ad3e9b656af0fe9e3fcf3146191d711 Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:37:00 +0100 Subject: [PATCH 02/18] feat(i18n): add English base locale JSON --- backend/app/i18n/en.json | 89 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 backend/app/i18n/en.json diff --git a/backend/app/i18n/en.json b/backend/app/i18n/en.json new file mode 100644 index 000000000..136554039 --- /dev/null +++ b/backend/app/i18n/en.json @@ -0,0 +1,89 @@ +{ + "nav": { + "github": "Visit our GitHub", + "mirofish": "MIROFISH" + }, + "home": { + "tag": "Simple Universal Swarm Intelligence Engine", + "version": "/ v0.1-Preview", + "title_line1": "Upload Any Report", + "title_line2": "Predict the Future", + "desc1": "Even from a single paragraph, MiroFish can automatically generate a parallel world with up to million-scale Agents based on real-world seeds. Inject variables from a god's-eye view to find the \"local optimum\" in complex group interactions.", + "slogan": "Let the future be rehearsed in Agent swarms, and decisions forged through simulation", + "status_label": "System Status", + "status_ready": "Ready", + "status_desc": "Prediction engine on standby. Upload unstructured data to initialize the simulation sequence.", + "metric_cost_value": "Low Cost", + "metric_cost_label": "~$5 per simulation", + "metric_scale_value": "Scalable", + "metric_scale_label": "Up to million-scale Agents", + "workflow_title": "Workflow Sequence", + "step1_title": "Graph Build", + "step1_desc": "Reality seed extraction & individual/group memory injection & GraphRAG build", + "step2_title": "Environment Setup", + "step2_desc": "Entity-relation extraction & persona generation & environment config Agent injection", + "step3_title": "Run Simulation", + "step3_desc": "Dual-platform parallel simulation & auto-parse prediction requirements & dynamic temporal memory updates", + "step4_title": "Report Generation", + "step4_desc": "ReportAgent uses a rich toolset for deep interaction with the post-simulation environment", + "step5_title": "Deep Interaction", + "step5_desc": "Chat with any agent in the simulated world & interact with ReportAgent", + "seed_label": "01 / Reality Seed", + "seed_formats": "Supported formats: PDF, MD, TXT", + "upload_title": "Drag & Drop Files", + "upload_hint": "or click to browse", + "input_params": "Input Parameters", + "prompt_label": ">_ 02 / Simulation Prompt", + "prompt_placeholder": "// Enter your simulation or prediction requirement in natural language (e.g. If a major company announces layoffs, what public sentiment trends would emerge?)", + "engine_badge": "Engine: MiroFish-V1.0", + "launch_btn": "Launch Engine", + "launching_btn": "Initializing..." + }, + "history": { + "title": "History Database", + "empty": "No simulation history found.", + "status_completed": "Completed", + "status_running": "Running", + "status_failed": "Failed", + "view_btn": "View", + "delete_btn": "Delete" + }, + "graph": { + "building": "Building knowledge graph...", + "complete": "Graph build complete", + "error": "Graph build failed" + }, + "simulation": { + "preparing": "Preparing simulation...", + "running": "Simulation running", + "complete": "Simulation complete", + "failed": "Simulation failed", + "twitter_platform": "Twitter", + "reddit_platform": "Reddit", + "rounds": "Rounds", + "agents": "Agents", + "start_btn": "Start Simulation", + "stop_btn": "Stop" + }, + "report": { + "generating": "Generating report...", + "complete": "Report ready", + "download_btn": "Download", + "interact_btn": "Interact with ReportAgent" + }, + "interaction": { + "placeholder": "Ask a question...", + "send_btn": "Send", + "select_agent": "Select an agent", + "report_agent": "ReportAgent" + }, + "common": { + "loading": "Loading...", + "error": "An error occurred", + "retry": "Retry", + "back": "Back", + "next": "Next", + "cancel": "Cancel", + "confirm": "Confirm" + } +} From 7fa6ed5c5e45134bb14406f80d21a566267359c7 Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:38:05 +0100 Subject: [PATCH 03/18] feat(i18n): add LLM-powered locale generator service --- backend/app/services/locale_generator.py | 73 ++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 backend/app/services/locale_generator.py diff --git a/backend/app/services/locale_generator.py b/backend/app/services/locale_generator.py new file mode 100644 index 000000000..22aa1eeb3 --- /dev/null +++ b/backend/app/services/locale_generator.py @@ -0,0 +1,73 @@ +""" +Locale generator service. +Uses LLM to translate the English base locale into any target language. +Generated locale files are cached to disk. +""" + +import json +from pathlib import Path + +from ..utils.llm_client import LLMClient +from ..utils.logger import get_logger + +logger = get_logger(__name__) + +I18N_DIR = Path(__file__).parent.parent / "i18n" +BASE_LOCALE = "en" + + +def _load_base() -> dict: + path = I18N_DIR / f"{BASE_LOCALE}.json" + with open(path, "r", encoding="utf-8") as f: + return json.load(f) + + +def get_locale(lang: str) -> dict: + """ + Return locale dict for given language tag (e.g. 'hu', 'ja', 'fr'). + Loads from cache if available, otherwise generates via LLM and caches. + """ + # Normalize: 'hu-HU' -> 'hu' + lang = lang.split("-")[0].lower() + + if lang == BASE_LOCALE: + return _load_base() + + cache_path = I18N_DIR / f"{lang}.json" + if cache_path.exists(): + with open(cache_path, "r", encoding="utf-8") as f: + return json.load(f) + + logger.info(f"Generating locale for '{lang}' via LLM...") + locale = _generate(lang) + + # Ensure directory exists before writing + cache_path.parent.mkdir(parents=True, exist_ok=True) + cache_path.write_text(json.dumps(locale, ensure_ascii=False, indent=2), encoding="utf-8") + logger.info(f"Locale '{lang}' cached to {cache_path}") + + return locale + + +def _generate(lang: str) -> dict: + base = _load_base() + client = LLMClient() + + prompt = f"""You are a professional UI translator. Translate the following JSON locale file from English to the language with BCP 47 tag "{lang}". + +Rules: +- Keep all JSON keys exactly as-is (do not translate keys) +- Translate only the string values +- Preserve placeholders like {{variable_name}} unchanged +- Keep technical terms like "MiroFish", "GraphRAG", "ReportAgent", "Agent" as-is +- Return only valid JSON, no markdown, no explanation + +English source: +{json.dumps(base, ensure_ascii=False, indent=2)}""" + + # Use chat_json() which handles JSON fence-stripping and parsing consistently + return client.chat_json( + messages=[{"role": "user", "content": prompt}], + temperature=0.2, + max_tokens=4096 + ) From 71db10354cee25b824d1526e3d1fc2860ff4143a Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:40:26 +0100 Subject: [PATCH 04/18] fix(i18n): validate language code to prevent prompt injection --- backend/app/services/locale_generator.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/backend/app/services/locale_generator.py b/backend/app/services/locale_generator.py index 22aa1eeb3..b27ecf212 100644 --- a/backend/app/services/locale_generator.py +++ b/backend/app/services/locale_generator.py @@ -5,6 +5,7 @@ """ import json +import re from pathlib import Path from ..utils.llm_client import LLMClient @@ -30,6 +31,10 @@ def get_locale(lang: str) -> dict: # Normalize: 'hu-HU' -> 'hu' lang = lang.split("-")[0].lower() + # Validate: only standard BCP 47 primary language subtags (2-3 alpha chars) + if not re.match(r'^[a-z]{2,3}$', lang): + raise ValueError(f"Invalid language code: {lang!r}") + if lang == BASE_LOCALE: return _load_base() From c33fee4939f48efc01442f17fc30863e427c081c Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:41:04 +0100 Subject: [PATCH 05/18] feat(i18n): add GET /api/locale/ endpoint --- backend/app/__init__.py | 3 ++- backend/app/api/__init__.py | 2 ++ backend/app/api/locale.py | 24 ++++++++++++++++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 backend/app/api/locale.py diff --git a/backend/app/__init__.py b/backend/app/__init__.py index aba624bba..b8ae925d5 100644 --- a/backend/app/__init__.py +++ b/backend/app/__init__.py @@ -63,10 +63,11 @@ def log_response(response): return response # 注册蓝图 - from .api import graph_bp, simulation_bp, report_bp + from .api import graph_bp, simulation_bp, report_bp, locale_bp app.register_blueprint(graph_bp, url_prefix='/api/graph') app.register_blueprint(simulation_bp, url_prefix='/api/simulation') app.register_blueprint(report_bp, url_prefix='/api/report') + app.register_blueprint(locale_bp, url_prefix='/api/locale') # 健康检查 @app.route('/health') diff --git a/backend/app/api/__init__.py b/backend/app/api/__init__.py index ffda743a3..ef35be541 100644 --- a/backend/app/api/__init__.py +++ b/backend/app/api/__init__.py @@ -7,8 +7,10 @@ graph_bp = Blueprint('graph', __name__) simulation_bp = Blueprint('simulation', __name__) report_bp = Blueprint('report', __name__) +locale_bp = Blueprint('locale', __name__) from . import graph # noqa: E402, F401 from . import simulation # noqa: E402, F401 from . import report # noqa: E402, F401 +from . import locale # noqa: E402, F401 diff --git a/backend/app/api/locale.py b/backend/app/api/locale.py new file mode 100644 index 000000000..60bd4d710 --- /dev/null +++ b/backend/app/api/locale.py @@ -0,0 +1,24 @@ +""" +Locale API endpoint. +GET /api/locale/ — returns locale JSON for given language tag. +""" + +from flask import jsonify + +from . import locale_bp +from ..services.locale_generator import get_locale +from ..utils.logger import get_logger + +logger = get_logger(__name__) + + +@locale_bp.route("/", methods=["GET"]) +def get_locale_route(lang: str): + try: + data = get_locale(lang) + return jsonify({"success": True, "data": data}) + except ValueError as e: + return jsonify({"success": False, "error": str(e)}), 400 + except Exception as e: + logger.error(f"Failed to get locale '{lang}': {e}") + return jsonify({"success": False, "error": str(e)}), 500 From 9f0863358e7bbd6c9043fca4aaacab29681d752b Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:42:09 +0100 Subject: [PATCH 06/18] feat(i18n): add language parameter to LLMClient.chat() and chat_json() --- backend/app/utils/llm_client.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/backend/app/utils/llm_client.py b/backend/app/utils/llm_client.py index 6c1a81f49..247524d3b 100644 --- a/backend/app/utils/llm_client.py +++ b/backend/app/utils/llm_client.py @@ -37,30 +37,39 @@ def chat( messages: List[Dict[str, str]], temperature: float = 0.7, max_tokens: int = 4096, - response_format: Optional[Dict] = None + response_format: Optional[Dict] = None, + language: Optional[str] = None ) -> str: """ 发送聊天请求 - + Args: messages: 消息列表 temperature: 温度参数 max_tokens: 最大token数 response_format: 响应格式(如JSON模式) - + language: 输出语言(BCP 47语言标签,如'zh-CN', 'fr'等) + Returns: 模型响应文本 """ + if language and language != "en": + lang_instruction = { + "role": "system", + "content": f"Generate all your output in the language with BCP 47 tag '{language}'. Do not use any other language." + } + messages = [lang_instruction] + list(messages) + kwargs = { "model": self.model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens, } - + if response_format: kwargs["response_format"] = response_format - + response = self.client.chat.completions.create(**kwargs) content = response.choices[0].message.content # 部分模型(如MiniMax M2.5)会在content中包含思考内容,需要移除 @@ -71,16 +80,18 @@ def chat_json( self, messages: List[Dict[str, str]], temperature: float = 0.3, - max_tokens: int = 4096 + max_tokens: int = 4096, + language: Optional[str] = None ) -> Dict[str, Any]: """ 发送聊天请求并返回JSON - + Args: messages: 消息列表 temperature: 温度参数 max_tokens: 最大token数 - + language: 输出语言(BCP 47语言标签,如'zh-CN', 'fr'等) + Returns: 解析后的JSON对象 """ @@ -88,7 +99,8 @@ def chat_json( messages=messages, temperature=temperature, max_tokens=max_tokens, - response_format={"type": "json_object"} + response_format={"type": "json_object"}, + language=language ) # 清理markdown代码块标记 cleaned_response = response.strip() From 48a6cdd006153042c9d84cbc14c6fa864bd724a7 Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:43:53 +0100 Subject: [PATCH 07/18] feat(i18n): add locale middleware, capture language before thread spawn Co-Authored-By: Claude Sonnet 4.6 --- backend/app/__init__.py | 9 ++++++++- backend/app/api/graph.py | 5 ++++- backend/app/api/simulation.py | 5 ++++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/backend/app/__init__.py b/backend/app/__init__.py index b8ae925d5..87ee9565d 100644 --- a/backend/app/__init__.py +++ b/backend/app/__init__.py @@ -9,7 +9,7 @@ # 需要在所有其他导入之前设置 warnings.filterwarnings("ignore", message=".*resource_tracker.*") -from flask import Flask, request +from flask import Flask, request, g from flask_cors import CORS from .config import Config @@ -48,6 +48,13 @@ def create_app(config_class=Config): if should_log_startup: logger.info("已注册模拟进程清理函数") + # Locale middleware — normalize Accept-Language header to a 2-letter code + @app.before_request + def set_locale(): + # Normalize 'hu-HU,hu;q=0.9,en;q=0.8' -> 'hu' + accept_lang = request.headers.get("Accept-Language", "en") + g.locale = accept_lang.split(",")[0].split("-")[0].lower() + # 请求日志中间件 @app.before_request def log_request(): diff --git a/backend/app/api/graph.py b/backend/app/api/graph.py index 12ff1ba2d..812c06936 100644 --- a/backend/app/api/graph.py +++ b/backend/app/api/graph.py @@ -6,7 +6,7 @@ import os import traceback import threading -from flask import request, jsonify +from flask import request, jsonify, g from . import graph_bp from ..config import Config @@ -370,6 +370,9 @@ def build_graph(): project.graph_build_task_id = task_id ProjectManager.save_project(project) + # Capture locale before spawning thread — g is not accessible inside threads + locale = getattr(g, 'locale', 'en') + # 启动后台任务 def build_task(): build_logger = get_logger('mirofish.build') diff --git a/backend/app/api/simulation.py b/backend/app/api/simulation.py index 3a0f68168..6edaff48b 100644 --- a/backend/app/api/simulation.py +++ b/backend/app/api/simulation.py @@ -5,7 +5,7 @@ import os import traceback -from flask import request, jsonify, send_file +from flask import request, jsonify, send_file, g from . import simulation_bp from ..config import Config @@ -500,6 +500,9 @@ def prepare_simulation(): state.status = SimulationStatus.PREPARING manager._save_simulation_state(state) + # Capture locale before spawning thread — g is not accessible inside threads + locale = getattr(g, 'locale', 'en') + # 定义后台任务 def run_prepare(): try: From e9aee39d09751741c9bee505ebbadc691e7e002b Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:51:44 +0100 Subject: [PATCH 08/18] feat(i18n): inject user language into all LLM service calls Co-Authored-By: Claude Sonnet 4.6 --- backend/app/api/graph.py | 4 +- backend/app/api/report.py | 19 ++++--- backend/app/api/simulation.py | 3 +- .../app/services/oasis_profile_generator.py | 34 +++++++---- backend/app/services/ontology_generator.py | 22 ++++--- backend/app/services/report_agent.py | 57 ++++++++++++------- .../services/simulation_config_generator.py | 26 +++++++-- backend/app/services/simulation_manager.py | 9 ++- 8 files changed, 119 insertions(+), 55 deletions(-) diff --git a/backend/app/api/graph.py b/backend/app/api/graph.py index 812c06936..0b3452c32 100644 --- a/backend/app/api/graph.py +++ b/backend/app/api/graph.py @@ -213,11 +213,13 @@ def generate_ontology(): # 生成本体 logger.info("调用 LLM 生成本体定义...") + locale = getattr(g, 'locale', 'en') generator = OntologyGenerator() ontology = generator.generate( document_texts=document_texts, simulation_requirement=simulation_requirement, - additional_context=additional_context if additional_context else None + additional_context=additional_context if additional_context else None, + language=locale ) # 保存本体到项目 diff --git a/backend/app/api/report.py b/backend/app/api/report.py index e05c73c39..49bcc8cd6 100644 --- a/backend/app/api/report.py +++ b/backend/app/api/report.py @@ -6,7 +6,7 @@ import os import traceback import threading -from flask import request, jsonify, send_file +from flask import request, jsonify, send_file, g from . import report_bp from ..config import Config @@ -120,6 +120,9 @@ def generate_report(): } ) + # Capture locale before spawning thread — g is not accessible inside threads + locale = getattr(g, 'locale', 'en') + # 定义后台任务 def run_generate(): try: @@ -129,14 +132,14 @@ def run_generate(): progress=0, message="初始化Report Agent..." ) - + # 创建Report Agent agent = ReportAgent( graph_id=graph_id, simulation_id=simulation_id, simulation_requirement=simulation_requirement ) - + # 进度回调 def progress_callback(stage, progress, message): task_manager.update_task( @@ -144,11 +147,12 @@ def progress_callback(stage, progress, message): progress=progress, message=f"[{stage}] {message}" ) - + # 生成报告(传入预先生成的 report_id) report = agent.generate_report( progress_callback=progress_callback, - report_id=report_id + report_id=report_id, + language=locale ) # 保存报告 @@ -542,8 +546,9 @@ def chat_with_report_agent(): simulation_id=simulation_id, simulation_requirement=simulation_requirement ) - - result = agent.chat(message=message, chat_history=chat_history) + + locale = getattr(g, 'locale', 'en') + result = agent.chat(message=message, chat_history=chat_history, language=locale) return jsonify({ "success": True, diff --git a/backend/app/api/simulation.py b/backend/app/api/simulation.py index 6edaff48b..a0b05aa18 100644 --- a/backend/app/api/simulation.py +++ b/backend/app/api/simulation.py @@ -585,7 +585,8 @@ def progress_callback(stage, progress, message, **kwargs): defined_entity_types=entity_types_list, use_llm_for_profiles=use_llm_for_profiles, progress_callback=progress_callback, - parallel_profile_count=parallel_profile_count + parallel_profile_count=parallel_profile_count, + language=locale ) # 任务完成 diff --git a/backend/app/services/oasis_profile_generator.py b/backend/app/services/oasis_profile_generator.py index 57836c539..b69b5bb1f 100644 --- a/backend/app/services/oasis_profile_generator.py +++ b/backend/app/services/oasis_profile_generator.py @@ -139,12 +139,19 @@ def to_dict(self) -> Dict[str, Any]: } +def _with_language(messages: list, language: str) -> list: + """Prepend language instruction to messages when not English.""" + if language and language != "en": + return [{"role": "system", "content": f"Generate all your output in the language with BCP 47 tag '{language}'. Do not use any other language."}] + list(messages) + return list(messages) + + class OasisProfileGenerator: """ OASIS Profile生成器 - + 将Zep图谱中的实体转换为OASIS模拟所需的Agent Profile - + 优化特性: 1. 调用Zep图谱检索功能获取更丰富的上下文 2. 生成非常详细的人设(包括基本信息、职业经历、性格特征、社交媒体行为等) @@ -209,10 +216,11 @@ def __init__( logger.warning(f"Zep客户端初始化失败: {e}") def generate_profile_from_entity( - self, - entity: EntityNode, + self, + entity: EntityNode, user_id: int, - use_llm: bool = True + use_llm: bool = True, + language: str = "en" ) -> OasisAgentProfile: """ 从Zep实体生成OASIS Agent Profile @@ -241,7 +249,8 @@ def generate_profile_from_entity( entity_type=entity_type, entity_summary=entity.summary, entity_attributes=entity.attributes, - context=context + context=context, + language=language ) else: # 使用规则生成基础人设 @@ -499,7 +508,8 @@ def _generate_profile_with_llm( entity_type: str, entity_summary: str, entity_attributes: Dict[str, Any], - context: str + context: str, + language: str = "en" ) -> Dict[str, Any]: """ 使用LLM生成非常详细的人设 @@ -528,10 +538,10 @@ def _generate_profile_with_llm( try: response = self.client.chat.completions.create( model=self.model_name, - messages=[ + messages=_with_language([ {"role": "system", "content": self._get_system_prompt(is_individual)}, {"role": "user", "content": prompt} - ], + ], language), response_format={"type": "json_object"}, temperature=0.7 - (attempt * 0.1) # 每次重试降低温度 # 不设置max_tokens,让LLM自由发挥 @@ -855,7 +865,8 @@ def generate_profiles_from_entities( graph_id: Optional[str] = None, parallel_count: int = 5, realtime_output_path: Optional[str] = None, - output_platform: str = "reddit" + output_platform: str = "reddit", + language: str = "en" ) -> List[OasisAgentProfile]: """ 批量从实体生成Agent Profile(支持并行生成) @@ -923,7 +934,8 @@ def generate_single_profile(idx: int, entity: EntityNode) -> tuple: profile = self.generate_profile_from_entity( entity=entity, user_id=idx, - use_llm=use_llm + use_llm=use_llm, + language=language ) # 实时输出生成的人设到控制台和日志 diff --git a/backend/app/services/ontology_generator.py b/backend/app/services/ontology_generator.py index 2d3e39bd8..f8996e86a 100644 --- a/backend/app/services/ontology_generator.py +++ b/backend/app/services/ontology_generator.py @@ -168,36 +168,44 @@ def generate( self, document_texts: List[str], simulation_requirement: str, - additional_context: Optional[str] = None + additional_context: Optional[str] = None, + language: str = "en" ) -> Dict[str, Any]: """ 生成本体定义 - + Args: document_texts: 文档文本列表 simulation_requirement: 模拟需求描述 additional_context: 额外上下文 - + language: BCP 47 language tag for LLM output (default: "en") + Returns: 本体定义(entity_types, edge_types等) """ # 构建用户消息 user_message = self._build_user_message( - document_texts, + document_texts, simulation_requirement, additional_context ) - + messages = [ {"role": "system", "content": ONTOLOGY_SYSTEM_PROMPT}, {"role": "user", "content": user_message} ] - + + # Prepend language instruction when not English + if language and language != "en": + messages = [ + {"role": "system", "content": f"Generate all your output in the language with BCP 47 tag '{language}'. Do not use any other language."} + ] + messages + # 调用LLM result = self.llm_client.chat_json( messages=messages, temperature=0.3, - max_tokens=4096 + max_tokens=8192 ) # 验证和后处理 diff --git a/backend/app/services/report_agent.py b/backend/app/services/report_agent.py index 02ca5bdc2..bd7d45f5a 100644 --- a/backend/app/services/report_agent.py +++ b/backend/app/services/report_agent.py @@ -901,18 +901,21 @@ def __init__( self.graph_id = graph_id self.simulation_id = simulation_id self.simulation_requirement = simulation_requirement - + self.llm = llm_client or LLMClient() self.zep_tools = zep_tools or ZepToolsService() - + + # Active language for LLM calls (set per-call by generate_report / chat) + self.language: str = "en" + # 工具定义 self.tools = self._define_tools() - + # 日志记录器(在 generate_report 中初始化) self.report_logger: Optional[ReportLogger] = None # 控制台日志记录器(在 generate_report 中初始化) self.console_logger: Optional[ReportConsoleLogger] = None - + logger.info(f"ReportAgent 初始化完成: graph_id={graph_id}, simulation_id={simulation_id}") def _define_tools(self) -> Dict[str, Dict[str, Any]]: @@ -952,6 +955,14 @@ def _define_tools(self) -> Dict[str, Dict[str, Any]]: } } + def _with_language(self, messages: list) -> list: + """Prepend language instruction to messages when not English.""" + if self.language and self.language != "en": + return [ + {"role": "system", "content": f"Generate all your output in the language with BCP 47 tag '{self.language}'. Do not use any other language."} + ] + list(messages) + return list(messages) + def _execute_tool(self, tool_name: str, parameters: Dict[str, Any], report_context: str = "") -> str: """ 执行工具调用 @@ -1174,10 +1185,10 @@ def plan_outline( try: response = self.llm.chat_json( - messages=[ + messages=self._with_language([ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt} - ], + ]), temperature=0.3 ) @@ -1275,11 +1286,11 @@ def _generate_section_react( section_title=section.title, ) - messages = [ + messages = self._with_language([ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt} - ] - + ]) + # ReACT循环 tool_calls_count = 0 max_iterations = 5 # 最大迭代轮数 @@ -1530,9 +1541,10 @@ def _generate_section_react( return final_answer def generate_report( - self, + self, progress_callback: Optional[Callable[[str, int, str], None]] = None, - report_id: Optional[str] = None + report_id: Optional[str] = None, + language: str = "en" ) -> Report: """ 生成完整报告(分章节实时输出) @@ -1556,7 +1568,10 @@ def generate_report( Report: 完整报告 """ import uuid - + + # Store language for use by _with_language() in LLM calls + self.language = language + # 如果没有传入 report_id,则自动生成 if not report_id: report_id = f"report_{uuid.uuid4().hex[:12]}" @@ -1764,9 +1779,10 @@ def generate_report( return report def chat( - self, + self, message: str, - chat_history: List[Dict[str, str]] = None + chat_history: List[Dict[str, str]] = None, + language: str = "en" ) -> Dict[str, Any]: """ 与Report Agent对话 @@ -1785,7 +1801,10 @@ def chat( } """ logger.info(f"Report Agent对话: {message[:50]}...") - + + # Store language for _with_language() calls + self.language = language + chat_history = chat_history or [] # 获取已生成的报告内容 @@ -1807,15 +1826,15 @@ def chat( ) # 构建消息 - messages = [{"role": "system", "content": system_prompt}] - + messages = self._with_language([{"role": "system", "content": system_prompt}]) + # 添加历史对话 for h in chat_history[-10:]: # 限制历史长度 messages.append(h) - + # 添加用户消息 messages.append({ - "role": "user", + "role": "user", "content": message }) diff --git a/backend/app/services/simulation_config_generator.py b/backend/app/services/simulation_config_generator.py index cc362508b..cfa5b85bf 100644 --- a/backend/app/services/simulation_config_generator.py +++ b/backend/app/services/simulation_config_generator.py @@ -196,10 +196,17 @@ def to_json(self, indent: int = 2) -> str: return json.dumps(self.to_dict(), ensure_ascii=False, indent=indent) +def _with_language(messages: list, language: str) -> list: + """Prepend language instruction to messages when not English.""" + if language and language != "en": + return [{"role": "system", "content": f"Generate all your output in the language with BCP 47 tag '{language}'. Do not use any other language."}] + list(messages) + return list(messages) + + class SimulationConfigGenerator: """ 模拟配置智能生成器 - + 使用LLM分析模拟需求、文档内容、图谱实体信息, 自动生成最佳的模拟参数配置 @@ -250,6 +257,7 @@ def generate_config( enable_twitter: bool = True, enable_reddit: bool = True, progress_callback: Optional[Callable[[int, int, str], None]] = None, + language: str = "en" ) -> SimulationParameters: """ 智能生成完整的模拟配置(分步生成) @@ -269,7 +277,10 @@ def generate_config( SimulationParameters: 完整的模拟参数 """ logger.info(f"开始智能生成模拟配置: simulation_id={simulation_id}, 实体数={len(entities)}") - + + # Store language so _call_llm_with_retry can use it + self._language = language + # 计算总步骤数 num_batches = math.ceil(len(entities) / self.AGENTS_PER_BATCH) total_steps = 3 + num_batches # 时间配置 + 事件配置 + N批Agent + 平台配置 @@ -433,18 +444,21 @@ def _summarize_entities(self, entities: List[EntityNode]) -> str: def _call_llm_with_retry(self, prompt: str, system_prompt: str) -> Dict[str, Any]: """带重试的LLM调用,包含JSON修复逻辑""" import re - + + # Use language stored by generate_config (falls back to "en") + language = getattr(self, '_language', 'en') + max_attempts = 3 last_error = None - + for attempt in range(max_attempts): try: response = self.client.chat.completions.create( model=self.model_name, - messages=[ + messages=_with_language([ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt} - ], + ], language), response_format={"type": "json_object"}, temperature=0.7 - (attempt * 0.1) # 每次重试降低温度 # 不设置max_tokens,让LLM自由发挥 diff --git a/backend/app/services/simulation_manager.py b/backend/app/services/simulation_manager.py index 96c496fd4..06b1d8ad1 100644 --- a/backend/app/services/simulation_manager.py +++ b/backend/app/services/simulation_manager.py @@ -234,7 +234,8 @@ def prepare_simulation( defined_entity_types: Optional[List[str]] = None, use_llm_for_profiles: bool = True, progress_callback: Optional[callable] = None, - parallel_profile_count: int = 3 + parallel_profile_count: int = 3, + language: str = "en" ) -> SimulationState: """ 准备模拟环境(全程自动化) @@ -342,7 +343,8 @@ def profile_progress(current, total, msg): graph_id=state.graph_id, # 传入graph_id用于Zep检索 parallel_count=parallel_profile_count, # 并行生成数量 realtime_output_path=realtime_output_path, # 实时保存路径 - output_platform=realtime_platform # 输出格式 + output_platform=realtime_platform, # 输出格式 + language=language ) state.profiles_count = len(profiles) @@ -407,7 +409,8 @@ def profile_progress(current, total, msg): document_text=document_text, entities=filtered.entities, enable_twitter=state.enable_twitter, - enable_reddit=state.enable_reddit + enable_reddit=state.enable_reddit, + language=language ) if progress_callback: From 38ad71a5ac5cbe525c22d3ac892be734b01ba1e8 Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:53:59 +0100 Subject: [PATCH 09/18] feat(i18n): install vue-i18n, auto-detect language, send Accept-Language header --- frontend/package-lock.json | 70 +++++++++++++++++++++++++++++++++++--- frontend/package.json | 1 + frontend/src/api/index.js | 2 ++ frontend/src/i18n/index.js | 35 +++++++++++++++++++ frontend/src/main.js | 13 ++++--- 5 files changed, 113 insertions(+), 8 deletions(-) create mode 100644 frontend/src/i18n/index.js diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 8c4fa710d..e29dbb914 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -11,6 +11,7 @@ "axios": "^1.13.2", "d3": "^7.9.0", "vue": "^3.5.24", + "vue-i18n": "^9.14.5", "vue-router": "^4.6.3" }, "devDependencies": { @@ -506,6 +507,50 @@ "node": ">=18" } }, + "node_modules/@intlify/core-base": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-9.14.5.tgz", + "integrity": "sha512-5ah5FqZG4pOoHjkvs8mjtv+gPKYU0zCISaYNjBNNqYiaITxW8ZtVih3GS/oTOqN8d9/mDLyrjD46GBApNxmlsA==", + "license": "MIT", + "dependencies": { + "@intlify/message-compiler": "9.14.5", + "@intlify/shared": "9.14.5" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/message-compiler": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-9.14.5.tgz", + "integrity": "sha512-IHzgEu61/YIpQV5Pc3aRWScDcnFKWvQA9kigcINcCBXN8mbW+vk9SK+lDxA6STzKQsVJxUPg9ACC52pKKo3SVQ==", + "license": "MIT", + "dependencies": { + "@intlify/shared": "9.14.5", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/shared": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-9.14.5.tgz", + "integrity": "sha512-9gB+E53BYuAEMhbCAxVgG38EZrk59sxBtv3jSizNL2hEWlgjBjAw1AwpLHtNaeda12pe6W20OGEa0TwuMSRbyQ==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", @@ -1331,7 +1376,6 @@ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", "license": "ISC", - "peer": true, "engines": { "node": ">=12" } @@ -1809,7 +1853,6 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -1943,7 +1986,6 @@ "integrity": "sha512-ITcnkFeR3+fI8P1wMgItjGrR10170d8auB4EpMLPqmx6uxElH3a/hHGQabSHKdqd4FXWO1nFIp9rRn7JQ34ACQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.5.0", @@ -2018,7 +2060,6 @@ "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.25.tgz", "integrity": "sha512-YLVdgv2K13WJ6n+kD5owehKtEXwdwXuj2TTyJMsO7pSeKw2bfRNZGjhB7YzrpbMYj5b5QsUebHpOqR3R3ziy/g==", "license": "MIT", - "peer": true, "dependencies": { "@vue/compiler-dom": "3.5.25", "@vue/compiler-sfc": "3.5.25", @@ -2035,6 +2076,27 @@ } } }, + "node_modules/vue-i18n": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/vue-i18n/-/vue-i18n-9.14.5.tgz", + "integrity": "sha512-0jQ9Em3ymWngyiIkj0+c/k7WgaPO+TNzjKSNq9BvBQaKJECqn9cd9fL4tkDhB5G1QBskGl9YxxbDAhgbFtpe2g==", + "deprecated": "v9 and v10 no longer supported. please migrate to v11. about maintenance status, see https://vue-i18n.intlify.dev/guide/maintenance.html", + "license": "MIT", + "dependencies": { + "@intlify/core-base": "9.14.5", + "@intlify/shared": "9.14.5", + "@vue/devtools-api": "^6.5.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + }, + "peerDependencies": { + "vue": "^3.0.0" + } + }, "node_modules/vue-router": { "version": "4.6.3", "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.3.tgz", diff --git a/frontend/package.json b/frontend/package.json index f7e995a14..7e05e8918 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -12,6 +12,7 @@ "axios": "^1.13.2", "d3": "^7.9.0", "vue": "^3.5.24", + "vue-i18n": "^9.14.5", "vue-router": "^4.6.3" }, "devDependencies": { diff --git a/frontend/src/api/index.js b/frontend/src/api/index.js index e2d9465b2..37f7de16f 100644 --- a/frontend/src/api/index.js +++ b/frontend/src/api/index.js @@ -1,4 +1,5 @@ import axios from 'axios' +import { detectLanguage } from '../i18n/index.js' // 创建axios实例 const service = axios.create({ @@ -12,6 +13,7 @@ const service = axios.create({ // 请求拦截器 service.interceptors.request.use( config => { + config.headers['Accept-Language'] = detectLanguage() return config }, error => { diff --git a/frontend/src/i18n/index.js b/frontend/src/i18n/index.js new file mode 100644 index 000000000..7bf678a8b --- /dev/null +++ b/frontend/src/i18n/index.js @@ -0,0 +1,35 @@ +import { createI18n } from 'vue-i18n' +import api from '../api/index.js' + +// Detect browser language, normalize to short tag (e.g. 'hu-HU' -> 'hu') +export function detectLanguage() { + const raw = navigator.language || navigator.languages?.[0] || 'en' + return raw.split('-')[0].toLowerCase() +} + +export async function setupI18n() { + const lang = detectLanguage() + + // Note: api.get() returns response.data directly (the Axios interceptor unwraps it). + // So res is already { success: true, data: {...locale...} }, and res.data is the locale object. + let messages = {} + try { + const res = await api.get(`/api/locale/${lang}`) + messages = res.data + } catch (e) { + console.warn(`Could not load locale '${lang}', falling back to English`) + try { + const res = await api.get('/api/locale/en') + messages = res.data + } catch { + messages = {} + } + } + + return createI18n({ + legacy: false, + locale: lang, + fallbackLocale: 'en', + messages: { [lang]: messages } + }) +} diff --git a/frontend/src/main.js b/frontend/src/main.js index c8e37b03b..bf4dd6d58 100644 --- a/frontend/src/main.js +++ b/frontend/src/main.js @@ -1,9 +1,14 @@ import { createApp } from 'vue' import App from './App.vue' import router from './router' +import { setupI18n } from './i18n/index.js' -const app = createApp(App) +async function bootstrap() { + const i18n = await setupI18n() + const app = createApp(App) + app.use(router) + app.use(i18n) + app.mount('#app') +} -app.use(router) - -app.mount('#app') +bootstrap() From 287a8f6229541e551804dcfe2abb506c743067ca Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 10:55:51 +0100 Subject: [PATCH 10/18] feat(i18n): replace hardcoded strings in Home.vue with $t() keys --- frontend/src/views/Home.vue | 83 +++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/frontend/src/views/Home.vue b/frontend/src/views/Home.vue index 30c202bb3..f268a1574 100644 --- a/frontend/src/views/Home.vue +++ b/frontend/src/views/Home.vue @@ -2,10 +2,10 @@
@@ -15,21 +15,21 @@
- Simple Universal Swarm Intelligence Engine - / v0.1-Preview + {{ $t('home.tag') }} + {{ $t('home.version') }}
- +

- Upload Any Report
- Predict the Future + {{ $t('home.title_line1') }}
+ {{ $t('home.title_line2') }}

-
+

- Even from a single paragraph, MiroFish can automatically generate a parallel world with up to million-scale Agents based on real-world seeds. Inject variables from a god's-eye view to find the ”local optimum” in complex group interactions. + {{ $t('home.desc1') }}

-

- Let the future be rehearsed in Agent swarms, and decisions forged through simulation_ +

+ {{ $t('home.slogan') }}_

@@ -53,65 +53,65 @@
- System Status + {{ $t('home.status_label') }}
-

Ready

+

{{ $t('home.status_ready') }}

- Prediction engine on standby. Upload unstructured data to initialize the simulation sequence. + {{ $t('home.status_desc') }}

-
Low Cost
-
~$5 per simulation
+
{{ $t('home.metric_cost_value') }}
+
{{ $t('home.metric_cost_label') }}
-
Scalable
-
Up to million-scale Agents
+
{{ $t('home.metric_scale_value') }}
+
{{ $t('home.metric_scale_label') }}
- Workflow Sequence + {{ $t('home.workflow_title') }}
01
-
Graph Build
-
Reality seed extraction & individual/group memory injection & GraphRAG build
+
{{ $t('home.step1_title') }}
+
{{ $t('home.step1_desc') }}
02
-
Environment Setup
-
Entity-relation extraction & persona generation & environment config Agent injection
+
{{ $t('home.step2_title') }}
+
{{ $t('home.step2_desc') }}
03
-
Run Simulation
-
Dual-platform parallel simulation & auto-parse prediction requirements & dynamic temporal memory updates
+
{{ $t('home.step3_title') }}
+
{{ $t('home.step3_desc') }}
04
-
Report Generation
-
ReportAgent uses a rich toolset for deep interaction with the post-simulation environment
+
{{ $t('home.step4_title') }}
+
{{ $t('home.step4_desc') }}
05
-
Deep Interaction
-
Chat with any agent in the simulated world & interact with ReportAgent
+
{{ $t('home.step5_title') }}
+
{{ $t('home.step5_desc') }}
@@ -124,8 +124,8 @@
- 01 / Reality Seed - Supported formats: PDF, MD, TXT + {{ $t('home.seed_label') }} + {{ $t('home.seed_formats') }}
-
Drag & Drop Files
-
or click to browse
+
{{ $t('home.upload_title') }}
+
{{ $t('home.upload_hint') }}
@@ -164,35 +164,35 @@
- Input Parameters + {{ $t('home.input_params') }}
- >_ 02 / Simulation Prompt + {{ $t('home.prompt_label') }}
-
Engine: MiroFish-V1.0
+
{{ $t('home.engine_badge') }}
-
@@ -209,8 +209,11 @@ diff --git a/frontend/src/views/MainView.vue b/frontend/src/views/MainView.vue index 8567f3a42..1b4cb67c0 100644 --- a/frontend/src/views/MainView.vue +++ b/frontend/src/views/MainView.vue @@ -22,7 +22,7 @@
- Step {{ currentStep }}/5 + {{ $t('workflow.step_counter', { current: currentStep, total: 5 }) }} {{ stepNames[currentStep - 1] }}
@@ -167,11 +167,11 @@ const toggleMaximize = (target) => { const handleNextStep = (params = {}) => { if (currentStep.value < 5) { currentStep.value++ - addLog(`进入 Step ${currentStep.value}: ${stepNames.value[currentStep.value - 1]}`) - + addLog(`Entering step ${currentStep.value}: ${stepNames.value[currentStep.value - 1]}`) + // 如果是从 Step 2 进入 Step 3,记录模拟轮数配置 if (currentStep.value === 3 && params.maxRounds) { - addLog(`自定义模拟轮数: ${params.maxRounds} 轮`) + addLog(`Custom simulation rounds: ${params.maxRounds}`) } } } @@ -179,7 +179,7 @@ const handleNextStep = (params = {}) => { const handleGoBack = () => { if (currentStep.value > 1) { currentStep.value-- - addLog(`返回 Step ${currentStep.value}: ${stepNames.value[currentStep.value - 1]}`) + addLog(`Returning to step ${currentStep.value}: ${stepNames.value[currentStep.value - 1]}`) } } diff --git a/frontend/src/views/ReportView.vue b/frontend/src/views/ReportView.vue index a6d3918aa..4e2fe1d91 100644 --- a/frontend/src/views/ReportView.vue +++ b/frontend/src/views/ReportView.vue @@ -22,7 +22,7 @@
- Step 4/5 + {{ $t('workflow.step_counter', { current: 4, total: 5 }) }} {{ $t('steps.report_generation') }}
diff --git a/frontend/src/views/SimulationRunView.vue b/frontend/src/views/SimulationRunView.vue index 0c5cfafac..3d5001de6 100644 --- a/frontend/src/views/SimulationRunView.vue +++ b/frontend/src/views/SimulationRunView.vue @@ -22,7 +22,7 @@
- Step 3/5 + {{ $t('workflow.step_counter', { current: 3, total: 5 }) }} {{ $t('steps.run_simulation') }}
diff --git a/frontend/src/views/SimulationView.vue b/frontend/src/views/SimulationView.vue index 79498c265..742de731e 100644 --- a/frontend/src/views/SimulationView.vue +++ b/frontend/src/views/SimulationView.vue @@ -22,7 +22,7 @@
- Step 2/5 + {{ $t('workflow.step_counter', { current: 2, total: 5 }) }} {{ $t('steps.env_setup') }}
@@ -148,13 +148,13 @@ const handleGoBack = () => { } const handleNextStep = (params = {}) => { - addLog('进入 Step 3: 开始模拟') - + addLog('Entering step 3: Run Simulation') + // 记录模拟轮数配置 if (params.maxRounds) { - addLog(`自定义模拟轮数: ${params.maxRounds} 轮`) + addLog(`Custom simulation rounds: ${params.maxRounds}`) } else { - addLog('使用自动配置的模拟轮数') + addLog('Using auto-configured simulation rounds') } // 构建路由参数 @@ -239,20 +239,20 @@ const forceStopSimulation = async () => { const loadSimulationData = async () => { try { - addLog(`加载模拟数据: ${currentSimulationId.value}`) - + addLog(`Loading simulation data: ${currentSimulationId.value}`) + // 获取 simulation 信息 const simRes = await getSimulation(currentSimulationId.value) if (simRes.success && simRes.data) { const simData = simRes.data - + // 获取 project 信息 if (simData.project_id) { const projRes = await getProject(simData.project_id) if (projRes.success && projRes.data) { projectData.value = projRes.data - addLog(`项目加载成功: ${projRes.data.project_id}`) - + addLog(`Project loaded successfully: ${projRes.data.project_id}`) + // 获取 graph 数据 if (projRes.data.graph_id) { await loadGraph(projRes.data.graph_id) @@ -260,10 +260,10 @@ const loadSimulationData = async () => { } } } else { - addLog(`加载模拟数据失败: ${simRes.error || '未知错误'}`) + addLog(`Failed to load simulation data: ${simRes.error || 'Unknown error'}`) } } catch (err) { - addLog(`加载异常: ${err.message}`) + addLog(`Load error: ${err.message}`) } } @@ -273,10 +273,10 @@ const loadGraph = async (graphId) => { const res = await getGraphData(graphId) if (res.success) { graphData.value = res.data - addLog('图谱数据加载成功') + addLog('Graph data loaded successfully') } } catch (err) { - addLog(`图谱加载失败: ${err.message}`) + addLog(`Graph load failed: ${err.message}`) } finally { graphLoading.value = false } @@ -289,11 +289,11 @@ const refreshGraph = () => { } onMounted(async () => { - addLog('SimulationView 初始化') - + addLog('SimulationView initialized') + // 检查并关闭正在运行的模拟(用户从 Step 3 返回时) await checkAndStopRunningSimulation() - + // 加载模拟数据 loadSimulationData() }) From 29c7e1e537fa0110947cb82d990be37df60cca20 Mon Sep 17 00:00:00 2001 From: K1skakas Date: Sun, 22 Mar 2026 11:14:54 +0100 Subject: [PATCH 13/18] feat(i18n): replace hardcoded strings in all component files with $t() keys - Step1GraphBuild: ontology/graph/build sections, badge labels, stats, log panel header - Step2EnvSetup: all phase badges, persona/config sections, addLog calls translated to English - Step3Simulation: report button, event counter, waiting state, monitor label, addLog calls - Step4Report: prediction report tag, waiting/complete labels, console output, deep interaction btn - Step5Interaction: interactive tools, survey section, chat placeholders, all Chinese UI strings - HistoryDatabase: simulation history header, formatRounds(), getSimulationTitle() - GraphPanel: panel title, refresh, loading/waiting states, legend, edge labels toggle, detail panel - en.json: added 50+ new keys under graph, simulation, report, interaction, history, env, graph_panel sections Co-Authored-By: Claude Sonnet 4.6 --- backend/app/i18n/en.json | 85 ++++++++- frontend/src/components/GraphPanel.vue | 21 ++- frontend/src/components/HistoryDatabase.vue | 10 +- frontend/src/components/Step1GraphBuild.vue | 54 +++--- frontend/src/components/Step2EnvSetup.vue | 184 +++++++++---------- frontend/src/components/Step3Simulation.vue | 83 ++++----- frontend/src/components/Step4Report.vue | 16 +- frontend/src/components/Step5Interaction.vue | 117 ++++++------ 8 files changed, 323 insertions(+), 247 deletions(-) diff --git a/backend/app/i18n/en.json b/backend/app/i18n/en.json index ee0769bc0..fb5c25bf6 100644 --- a/backend/app/i18n/en.json +++ b/backend/app/i18n/en.json @@ -46,12 +46,31 @@ "status_running": "Running", "status_failed": "Failed", "view_btn": "View", - "delete_btn": "Delete" + "delete_btn": "Delete", + "simulation_history": "Simulation History", + "not_started": "Not started", + "unnamed": "Unnamed Simulation" }, "graph": { "building": "Building knowledge graph...", "complete": "Graph build complete", - "error": "Graph build failed" + "error": "Graph build failed", + "ontology_generation": "Ontology Generation", + "graphrag_build": "GraphRAG Build", + "build_complete": "Build Complete", + "enter_env_setup": "Enter Environment Setup ➝", + "creating": "Creating...", + "analyzing": "Analyzing documents...", + "entity_nodes": "Entity Nodes", + "relation_edges": "Relation Edges", + "schema_types": "Schema Types", + "system_dashboard": "SYSTEM DASHBOARD", + "proceed_desc": "Graph build complete. Proceed to the next step to set up the simulation environment.", + "generated_entity_types": "GENERATED ENTITY TYPES", + "generated_relation_types": "GENERATED RELATION TYPES", + "attributes": "ATTRIBUTES", + "examples": "EXAMPLES", + "connections": "CONNECTIONS" }, "simulation": { "preparing": "Preparing simulation...", @@ -63,19 +82,46 @@ "rounds": "Rounds", "agents": "Agents", "start_btn": "Start Simulation", - "stop_btn": "Stop" + "stop_btn": "Stop", + "elapsed_time": "Elapsed Time", + "waiting": "Waiting for agent actions...", + "generate_report": "Generate Report", + "starting": "Starting...", + "system_monitor": "SIMULATION MONITOR", + "total_events": "TOTAL EVENTS" }, "report": { "generating": "Generating report...", "complete": "Report ready", "download_btn": "Download", - "interact_btn": "Interact with ReportAgent" + "interact_btn": "Interact with ReportAgent", + "waiting": "Waiting for Report Agent...", + "deep_interaction": "Deep Interaction", + "prediction_report": "Prediction Report", + "console_output": "CONSOLE OUTPUT", + "report_complete": "Report Generation Complete" }, "interaction": { "placeholder": "Ask a question...", "send_btn": "Send", "select_agent": "Select an agent", - "report_agent": "ReportAgent" + "report_agent": "ReportAgent", + "chat_report_agent": "Chat with Report Agent", + "chat_any_agent": "Chat with any agent", + "send_survey": "Send Survey", + "interactive_tools": "Interactive Tools", + "select_agent_dropdown": "Select Agent", + "survey_select_targets": "Select Survey Targets", + "survey_selected": "Selected {count} / {total}", + "survey_question_label": "Survey Question", + "survey_submit_btn": "Send Survey", + "survey_results": "Survey Results", + "survey_replies": "{count} replies", + "select_all": "Select All", + "clear_selection": "Clear", + "report_agent_full_name": "Report Agent - Chat", + "unknown_profession": "Unknown Profession", + "bio_label": "Bio" }, "common": { "loading": "Loading...", @@ -112,5 +158,34 @@ "initializing": "Initializing", "building_graph": "Building Graph", "generating_ontology": "Generating Ontology" + }, + "env": { + "sim_instance_init": "Simulation Instance Init", + "gen_agent_personas": "Generate Agent Personas", + "gen_dual_platform_config": "Generate Dual-Platform Config", + "initial_activation": "Initial Activation Orchestration", + "setup_complete": "Setup Complete", + "back_to_graph": "Back to Graph Build", + "start_dual_world": "Start Dual-World Simulation ➝", + "narrative_direction": "Narrative Direction", + "initial_hot_topics": "Initial Hot Topics", + "current_agents": "Current Agents", + "expected_total": "Expected Total", + "current_topics": "Current Topics", + "system_dashboard": "SYSTEM DASHBOARD", + "orchestrating": "Orchestrating" + }, + "graph_panel": { + "title": "Graph Relationship Visualization", + "refresh": "Refresh", + "loading": "Loading graph data...", + "waiting": "Waiting for ontology generation...", + "entity_types": "Entity Types", + "show_edge_labels": "Show Edge Labels", + "node_details": "Node Details", + "relationship": "Relationship", + "memory_updating": "GraphRAG memory updating in real-time", + "updating": "Updating in real-time...", + "processing_hint": "Some content still processing, consider refreshing manually" } } diff --git a/frontend/src/components/GraphPanel.vue b/frontend/src/components/GraphPanel.vue index 7d7bb2428..4f9107585 100644 --- a/frontend/src/components/GraphPanel.vue +++ b/frontend/src/components/GraphPanel.vue @@ -1,12 +1,12 @@