diff --git a/backend/app/api/graph.py b/backend/app/api/graph.py index 12ff1ba2d..759ff48b0 100644 --- a/backend/app/api/graph.py +++ b/backend/app/api/graph.py @@ -15,6 +15,7 @@ from ..services.text_processor import TextProcessor from ..utils.file_parser import FileParser from ..utils.logger import get_logger +from ..utils.locale import t, get_locale, set_locale from ..models.task import TaskManager, TaskStatus from ..models.project import ProjectManager, ProjectStatus @@ -42,9 +43,9 @@ def get_project(project_id: str): if not project: return jsonify({ "success": False, - "error": f"项目不存在: {project_id}" + "error": t('api.projectNotFound', id=project_id) }), 404 - + return jsonify({ "success": True, "data": project.to_dict() @@ -76,12 +77,12 @@ def delete_project(project_id: str): if not success: return jsonify({ "success": False, - "error": f"项目不存在或删除失败: {project_id}" + "error": t('api.projectDeleteFailed', id=project_id) }), 404 - + return jsonify({ "success": True, - "message": f"项目已删除: {project_id}" + "message": t('api.projectDeleted', id=project_id) }) @@ -95,9 +96,9 @@ def reset_project(project_id: str): if not project: return jsonify({ "success": False, - "error": f"项目不存在: {project_id}" + "error": t('api.projectNotFound', id=project_id) }), 404 - + # 重置到本体已生成状态 if project.ontology: project.status = ProjectStatus.ONTOLOGY_GENERATED @@ -111,7 +112,7 @@ def reset_project(project_id: str): return jsonify({ "success": True, - "message": f"项目已重置: {project_id}", + "message": t('api.projectReset', id=project_id), "data": project.to_dict() }) @@ -160,7 +161,7 @@ def generate_ontology(): if not simulation_requirement: return jsonify({ "success": False, - "error": "请提供模拟需求描述 (simulation_requirement)" + "error": t('api.requireSimulationRequirement') }), 400 # 获取上传的文件 @@ -168,7 +169,7 @@ def generate_ontology(): if not uploaded_files or all(not f.filename for f in uploaded_files): return jsonify({ "success": False, - "error": "请至少上传一个文档文件" + "error": t('api.requireFileUpload') }), 400 # 创建项目 @@ -203,7 +204,7 @@ def generate_ontology(): ProjectManager.delete_project(project.project_id) return jsonify({ "success": False, - "error": "没有成功处理任何文档,请检查文件格式" + "error": t('api.noDocProcessed') }), 400 # 保存提取的文本 @@ -285,12 +286,12 @@ def build_graph(): # 检查配置 errors = [] if not Config.ZEP_API_KEY: - errors.append("ZEP_API_KEY未配置") + errors.append(t('api.zepApiKeyMissing')) if errors: logger.error(f"配置错误: {errors}") return jsonify({ "success": False, - "error": "配置错误: " + "; ".join(errors) + "error": t('api.configError', details="; ".join(errors)) }), 500 # 解析请求 @@ -301,7 +302,7 @@ def build_graph(): if not project_id: return jsonify({ "success": False, - "error": "请提供 project_id" + "error": t('api.requireProjectId') }), 400 # 获取项目 @@ -309,22 +310,22 @@ def build_graph(): if not project: return jsonify({ "success": False, - "error": f"项目不存在: {project_id}" + "error": t('api.projectNotFound', id=project_id) }), 404 - + # 检查项目状态 force = data.get('force', False) # 强制重新构建 if project.status == ProjectStatus.CREATED: return jsonify({ "success": False, - "error": "项目尚未生成本体,请先调用 /ontology/generate" + "error": t('api.ontologyNotGenerated') }), 400 if project.status == ProjectStatus.GRAPH_BUILDING and not force: return jsonify({ "success": False, - "error": "图谱正在构建中,请勿重复提交。如需强制重建,请添加 force: true", + "error": t('api.graphBuilding'), "task_id": project.graph_build_task_id }), 400 @@ -349,7 +350,7 @@ def build_graph(): if not text: return jsonify({ "success": False, - "error": "未找到提取的文本内容" + "error": t('api.textNotFound') }), 400 # 获取本体 @@ -357,7 +358,7 @@ def build_graph(): if not ontology: return jsonify({ "success": False, - "error": "未找到本体定义" + "error": t('api.ontologyNotFound') }), 400 # 创建异步任务 @@ -370,15 +371,19 @@ def build_graph(): project.graph_build_task_id = task_id ProjectManager.save_project(project) + # Capture locale before spawning background thread + current_locale = get_locale() + # 启动后台任务 def build_task(): + set_locale(current_locale) build_logger = get_logger('mirofish.build') try: build_logger.info(f"[{task_id}] 开始构建图谱...") task_manager.update_task( task_id, status=TaskStatus.PROCESSING, - message="初始化图谱构建服务..." + message=t('progress.initGraphService') ) # 创建图谱构建服务 @@ -387,7 +392,7 @@ def build_task(): # 分块 task_manager.update_task( task_id, - message="文本分块中...", + message=t('progress.textChunking'), progress=5 ) chunks = TextProcessor.split_text( @@ -400,7 +405,7 @@ def build_task(): # 创建图谱 task_manager.update_task( task_id, - message="创建Zep图谱...", + message=t('progress.creatingZepGraph'), progress=10 ) graph_id = builder.create_graph(name=graph_name) @@ -412,7 +417,7 @@ def build_task(): # 设置本体 task_manager.update_task( task_id, - message="设置本体定义...", + message=t('progress.settingOntology'), progress=15 ) builder.set_ontology(graph_id, ontology) @@ -428,7 +433,7 @@ def add_progress_callback(msg, progress_ratio): task_manager.update_task( task_id, - message=f"开始添加 {total_chunks} 个文本块...", + message=t('progress.addingChunks', count=total_chunks), progress=15 ) @@ -442,7 +447,7 @@ def add_progress_callback(msg, progress_ratio): # 等待Zep处理完成(查询每个episode的processed状态) task_manager.update_task( task_id, - message="等待Zep处理数据...", + message=t('progress.waitingZepProcess'), progress=55 ) @@ -459,7 +464,7 @@ def wait_progress_callback(msg, progress_ratio): # 获取图谱数据 task_manager.update_task( task_id, - message="获取图谱数据...", + message=t('progress.fetchingGraphData'), progress=95 ) graph_data = builder.get_graph_data(graph_id) @@ -476,7 +481,7 @@ def wait_progress_callback(msg, progress_ratio): task_manager.update_task( task_id, status=TaskStatus.COMPLETED, - message="图谱构建完成", + message=t('progress.graphBuildComplete'), progress=100, result={ "project_id": project_id, @@ -499,7 +504,7 @@ def wait_progress_callback(msg, progress_ratio): task_manager.update_task( task_id, status=TaskStatus.FAILED, - message=f"构建失败: {str(e)}", + message=t('progress.buildFailed', error=str(e)), error=traceback.format_exc() ) @@ -512,7 +517,7 @@ def wait_progress_callback(msg, progress_ratio): "data": { "project_id": project_id, "task_id": task_id, - "message": "图谱构建任务已启动,请通过 /task/{task_id} 查询进度" + "message": t('api.graphBuildStarted', taskId=task_id) } }) @@ -536,7 +541,7 @@ def get_task(task_id: str): if not task: return jsonify({ "success": False, - "error": f"任务不存在: {task_id}" + "error": t('api.taskNotFound', id=task_id) }), 404 return jsonify({ @@ -570,7 +575,7 @@ def get_graph_data(graph_id: str): if not Config.ZEP_API_KEY: return jsonify({ "success": False, - "error": "ZEP_API_KEY未配置" + "error": t('api.zepApiKeyMissing') }), 500 builder = GraphBuilderService(api_key=Config.ZEP_API_KEY) @@ -598,7 +603,7 @@ def delete_graph(graph_id: str): if not Config.ZEP_API_KEY: return jsonify({ "success": False, - "error": "ZEP_API_KEY未配置" + "error": t('api.zepApiKeyMissing') }), 500 builder = GraphBuilderService(api_key=Config.ZEP_API_KEY) @@ -606,7 +611,7 @@ def delete_graph(graph_id: str): return jsonify({ "success": True, - "message": f"图谱已删除: {graph_id}" + "message": t('api.graphDeleted', id=graph_id) }) except Exception as e: diff --git a/backend/app/api/report.py b/backend/app/api/report.py index e05c73c39..d7f2a4d03 100644 --- a/backend/app/api/report.py +++ b/backend/app/api/report.py @@ -15,6 +15,7 @@ from ..models.project import ProjectManager from ..models.task import TaskManager, TaskStatus from ..utils.logger import get_logger +from ..utils.locale import t, get_locale, set_locale logger = get_logger('mirofish.api.report') @@ -53,9 +54,9 @@ def generate_report(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 - + force_regenerate = data.get('force_regenerate', False) # 获取模拟信息 @@ -65,9 +66,9 @@ def generate_report(): if not state: return jsonify({ "success": False, - "error": f"模拟不存在: {simulation_id}" + "error": t('api.simulationNotFound', id=simulation_id) }), 404 - + # 检查是否已有报告 if not force_regenerate: existing_report = ReportManager.get_report_by_simulation(simulation_id) @@ -78,7 +79,7 @@ def generate_report(): "simulation_id": simulation_id, "report_id": existing_report.report_id, "status": "completed", - "message": "报告已存在", + "message": t('api.reportAlreadyExists'), "already_generated": True } }) @@ -88,21 +89,21 @@ def generate_report(): if not project: return jsonify({ "success": False, - "error": f"项目不存在: {state.project_id}" + "error": t('api.projectNotFound', id=state.project_id) }), 404 graph_id = state.graph_id or project.graph_id if not graph_id: return jsonify({ "success": False, - "error": "缺少图谱ID,请确保已构建图谱" + "error": t('api.missingGraphIdEnsure') }), 400 simulation_requirement = project.simulation_requirement if not simulation_requirement: return jsonify({ "success": False, - "error": "缺少模拟需求描述" + "error": t('api.missingSimRequirement') }), 400 # 提前生成 report_id,以便立即返回给前端 @@ -120,14 +121,18 @@ def generate_report(): } ) + # Capture locale before spawning background thread + current_locale = get_locale() + # 定义后台任务 def run_generate(): + set_locale(current_locale) try: task_manager.update_task( task_id, status=TaskStatus.PROCESSING, progress=0, - message="初始化Report Agent..." + message=t('api.initReportAgent') ) # 创建Report Agent @@ -164,7 +169,7 @@ def progress_callback(stage, progress, message): } ) else: - task_manager.fail_task(task_id, report.error or "报告生成失败") + task_manager.fail_task(task_id, report.error or t('api.reportGenerateFailed')) except Exception as e: logger.error(f"报告生成失败: {str(e)}") @@ -181,7 +186,7 @@ def progress_callback(stage, progress, message): "report_id": report_id, "task_id": task_id, "status": "generating", - "message": "报告生成任务已启动,请通过 /api/report/generate/status 查询进度", + "message": t('api.reportGenerateStarted'), "already_generated": False } }) @@ -234,7 +239,7 @@ def get_generate_status(): "report_id": existing_report.report_id, "status": "completed", "progress": 100, - "message": "报告已生成", + "message": t('api.reportGenerated'), "already_completed": True } }) @@ -242,7 +247,7 @@ def get_generate_status(): if not task_id: return jsonify({ "success": False, - "error": "请提供 task_id 或 simulation_id" + "error": t('api.requireTaskOrSimId') }), 400 task_manager = TaskManager() @@ -251,7 +256,7 @@ def get_generate_status(): if not task: return jsonify({ "success": False, - "error": f"任务不存在: {task_id}" + "error": t('api.taskNotFound', id=task_id) }), 404 return jsonify({ @@ -294,7 +299,7 @@ def get_report(report_id: str): if not report: return jsonify({ "success": False, - "error": f"报告不存在: {report_id}" + "error": t('api.reportNotFound', id=report_id) }), 404 return jsonify({ @@ -331,7 +336,7 @@ def get_report_by_simulation(simulation_id: str): if not report: return jsonify({ "success": False, - "error": f"该模拟暂无报告: {simulation_id}", + "error": t('api.noReportForSim', id=simulation_id), "has_report": False }), 404 @@ -403,7 +408,7 @@ def download_report(report_id: str): if not report: return jsonify({ "success": False, - "error": f"报告不存在: {report_id}" + "error": t('api.reportNotFound', id=report_id) }), 404 md_path = ReportManager._get_report_markdown_path(report_id) @@ -445,12 +450,12 @@ def delete_report(report_id: str): if not success: return jsonify({ "success": False, - "error": f"报告不存在: {report_id}" + "error": t('api.reportNotFound', id=report_id) }), 404 return jsonify({ "success": True, - "message": f"报告已删除: {report_id}" + "message": t('api.reportDeleted', id=report_id) }) except Exception as e: @@ -501,13 +506,13 @@ def chat_with_report_agent(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 - + if not message: return jsonify({ "success": False, - "error": "请提供 message" + "error": t('api.requireMessage') }), 400 # 获取模拟和项目信息 @@ -517,21 +522,21 @@ def chat_with_report_agent(): if not state: return jsonify({ "success": False, - "error": f"模拟不存在: {simulation_id}" + "error": t('api.simulationNotFound', id=simulation_id) }), 404 - + project = ProjectManager.get_project(state.project_id) if not project: return jsonify({ "success": False, - "error": f"项目不存在: {state.project_id}" + "error": t('api.projectNotFound', id=state.project_id) }), 404 graph_id = state.graph_id or project.graph_id if not graph_id: return jsonify({ "success": False, - "error": "缺少图谱ID" + "error": t('api.missingGraphId') }), 400 simulation_requirement = project.simulation_requirement or "" @@ -585,7 +590,7 @@ def get_report_progress(report_id: str): if not progress: return jsonify({ "success": False, - "error": f"报告不存在或进度信息不可用: {report_id}" + "error": t('api.reportProgressNotAvail', id=report_id) }), 404 return jsonify({ @@ -673,7 +678,7 @@ def get_single_section(report_id: str, section_index: int): if not os.path.exists(section_path): return jsonify({ "success": False, - "error": f"章节不存在: section_{section_index:02d}.md" + "error": t('api.sectionNotFound', index=f"{section_index:02d}") }), 404 with open(section_path, 'r', encoding='utf-8') as f: @@ -949,7 +954,7 @@ def search_graph_tool(): if not graph_id or not query: return jsonify({ "success": False, - "error": "请提供 graph_id 和 query" + "error": t('api.requireGraphIdAndQuery') }), 400 from ..services.zep_tools import ZepToolsService @@ -993,7 +998,7 @@ def get_graph_statistics_tool(): if not graph_id: return jsonify({ "success": False, - "error": "请提供 graph_id" + "error": t('api.requireGraphId') }), 400 from ..services.zep_tools import ZepToolsService diff --git a/backend/app/api/simulation.py b/backend/app/api/simulation.py index 3a0f68168..3a8e1e3fc 100644 --- a/backend/app/api/simulation.py +++ b/backend/app/api/simulation.py @@ -14,6 +14,7 @@ from ..services.simulation_manager import SimulationManager, SimulationStatus from ..services.simulation_runner import SimulationRunner, RunnerStatus from ..utils.logger import get_logger +from ..utils.locale import t, get_locale, set_locale from ..models.project import ProjectManager logger = get_logger('mirofish.api.simulation') @@ -59,7 +60,7 @@ def get_graph_entities(graph_id: str): if not Config.ZEP_API_KEY: return jsonify({ "success": False, - "error": "ZEP_API_KEY未配置" + "error": t('api.zepApiKeyMissing') }), 500 entity_types_str = request.args.get('entity_types', '') @@ -96,7 +97,7 @@ def get_entity_detail(graph_id: str, entity_uuid: str): if not Config.ZEP_API_KEY: return jsonify({ "success": False, - "error": "ZEP_API_KEY未配置" + "error": t('api.zepApiKeyMissing') }), 500 reader = ZepEntityReader() @@ -105,7 +106,7 @@ def get_entity_detail(graph_id: str, entity_uuid: str): if not entity: return jsonify({ "success": False, - "error": f"实体不存在: {entity_uuid}" + "error": t('api.entityNotFound', id=entity_uuid) }), 404 return jsonify({ @@ -129,7 +130,7 @@ def get_entities_by_type(graph_id: str, entity_type: str): if not Config.ZEP_API_KEY: return jsonify({ "success": False, - "error": "ZEP_API_KEY未配置" + "error": t('api.zepApiKeyMissing') }), 500 enrich = request.args.get('enrich', 'true').lower() == 'true' @@ -197,21 +198,21 @@ def create_simulation(): if not project_id: return jsonify({ "success": False, - "error": "请提供 project_id" + "error": t('api.requireProjectId') }), 400 project = ProjectManager.get_project(project_id) if not project: return jsonify({ "success": False, - "error": f"项目不存在: {project_id}" + "error": t('api.projectNotFound', id=project_id) }), 404 graph_id = data.get('graph_id') or project.graph_id if not graph_id: return jsonify({ "success": False, - "error": "项目尚未构建图谱,请先调用 /api/graph/build" + "error": t('api.graphNotBuilt') }), 400 manager = SimulationManager() @@ -408,7 +409,7 @@ def prepare_simulation(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 manager = SimulationManager() @@ -417,7 +418,7 @@ def prepare_simulation(): if not state: return jsonify({ "success": False, - "error": f"模拟不存在: {simulation_id}" + "error": t('api.simulationNotFound', id=simulation_id) }), 404 # 检查是否强制重新生成 @@ -436,7 +437,7 @@ def prepare_simulation(): "data": { "simulation_id": simulation_id, "status": "ready", - "message": "已有完成的准备工作,无需重复生成", + "message": t('api.alreadyPrepared'), "already_prepared": True, "prepare_info": prepare_info } @@ -449,7 +450,7 @@ def prepare_simulation(): if not project: return jsonify({ "success": False, - "error": f"项目不存在: {state.project_id}" + "error": t('api.projectNotFound', id=state.project_id) }), 404 # 获取模拟需求 @@ -457,7 +458,7 @@ def prepare_simulation(): if not simulation_requirement: return jsonify({ "success": False, - "error": "项目缺少模拟需求描述 (simulation_requirement)" + "error": t('api.projectMissingRequirement') }), 400 # 获取文档文本 @@ -500,14 +501,18 @@ def prepare_simulation(): state.status = SimulationStatus.PREPARING manager._save_simulation_state(state) + # Capture locale before spawning background thread + current_locale = get_locale() + # 定义后台任务 def run_prepare(): + set_locale(current_locale) try: task_manager.update_task( task_id, status=TaskStatus.PROCESSING, progress=0, - message="开始准备模拟环境..." + message=t('progress.startPreparingEnv') ) # 准备模拟(带进度回调) @@ -528,10 +533,10 @@ def progress_callback(stage, progress, message, **kwargs): # 构建详细进度信息 stage_names = { - "reading": "读取图谱实体", - "generating_profiles": "生成Agent人设", - "generating_config": "生成模拟配置", - "copying_scripts": "准备模拟脚本" + "reading": t('progress.readingGraphEntities'), + "generating_profiles": t('progress.generatingProfiles'), + "generating_config": t('progress.generatingSimConfig'), + "copying_scripts": t('progress.preparingScripts') } stage_index = list(stage_weights.keys()).index(stage) + 1 if stage in stage_weights else 1 @@ -612,7 +617,7 @@ def progress_callback(stage, progress, message, **kwargs): "simulation_id": simulation_id, "task_id": task_id, "status": "preparing", - "message": "准备任务已启动,请通过 /api/simulation/prepare/status 查询进度", + "message": t('api.prepareStarted'), "already_prepared": False, "expected_entities_count": state.entities_count, # 预期的Agent总数 "entity_types": state.entity_types # 实体类型列表 @@ -680,7 +685,7 @@ def get_prepare_status(): "simulation_id": simulation_id, "status": "ready", "progress": 100, - "message": "已有完成的准备工作", + "message": t('api.alreadyPrepared'), "already_prepared": True, "prepare_info": prepare_info } @@ -696,13 +701,13 @@ def get_prepare_status(): "simulation_id": simulation_id, "status": "not_started", "progress": 0, - "message": "尚未开始准备,请调用 /api/simulation/prepare 开始", + "message": t('api.notStartedPrepare'), "already_prepared": False } }) return jsonify({ "success": False, - "error": "请提供 task_id 或 simulation_id" + "error": t('api.requireTaskOrSimId') }), 400 task_manager = TaskManager() @@ -720,7 +725,7 @@ def get_prepare_status(): "task_id": task_id, "status": "ready", "progress": 100, - "message": "任务已完成(准备工作已存在)", + "message": t('api.taskCompletedPrepared'), "already_prepared": True, "prepare_info": prepare_info } @@ -728,7 +733,7 @@ def get_prepare_status(): return jsonify({ "success": False, - "error": f"任务不存在: {task_id}" + "error": t('api.taskNotFound', id=task_id) }), 404 task_dict = task.to_dict() @@ -757,7 +762,7 @@ def get_simulation(simulation_id: str): if not state: return jsonify({ "success": False, - "error": f"模拟不存在: {simulation_id}" + "error": t('api.simulationNotFound', id=simulation_id) }), 404 result = state.to_dict() @@ -1061,7 +1066,7 @@ def get_simulation_profiles_realtime(simulation_id: str): if not os.path.exists(sim_dir): return jsonify({ "success": False, - "error": f"模拟不存在: {simulation_id}" + "error": t('api.simulationNotFound', id=simulation_id) }), 404 # 确定文件路径 @@ -1164,7 +1169,7 @@ def get_simulation_config_realtime(simulation_id: str): if not os.path.exists(sim_dir): return jsonify({ "success": False, - "error": f"模拟不存在: {simulation_id}" + "error": t('api.simulationNotFound', id=simulation_id) }), 404 # 配置文件路径 @@ -1269,7 +1274,7 @@ def get_simulation_config(simulation_id: str): if not config: return jsonify({ "success": False, - "error": f"模拟配置不存在,请先调用 /prepare 接口" + "error": t('api.configNotFound') }), 404 return jsonify({ @@ -1297,7 +1302,7 @@ def download_simulation_config(simulation_id: str): if not os.path.exists(config_path): return jsonify({ "success": False, - "error": "配置文件不存在,请先调用 /prepare 接口" + "error": t('api.configFileNotFound') }), 404 return send_file( @@ -1341,7 +1346,7 @@ def download_simulation_script(script_name: str): if script_name not in allowed_scripts: return jsonify({ "success": False, - "error": f"未知脚本: {script_name},可选: {allowed_scripts}" + "error": t('api.unknownScript', name=script_name, allowed=allowed_scripts) }), 400 script_path = os.path.join(scripts_dir, script_name) @@ -1349,7 +1354,7 @@ def download_simulation_script(script_name: str): if not os.path.exists(script_path): return jsonify({ "success": False, - "error": f"脚本文件不存在: {script_name}" + "error": t('api.scriptFileNotFound', name=script_name) }), 404 return send_file( @@ -1389,7 +1394,7 @@ def generate_profiles(): if not graph_id: return jsonify({ "success": False, - "error": "请提供 graph_id" + "error": t('api.requireGraphId') }), 400 entity_types = data.get('entity_types') @@ -1406,7 +1411,7 @@ def generate_profiles(): if filtered.filtered_count == 0: return jsonify({ "success": False, - "error": "没有找到符合条件的实体" + "error": t('api.noMatchingEntities') }), 400 generator = OasisProfileGenerator() @@ -1491,7 +1496,7 @@ def start_simulation(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 platform = data.get('platform', 'parallel') @@ -1506,18 +1511,18 @@ def start_simulation(): if max_rounds <= 0: return jsonify({ "success": False, - "error": "max_rounds 必须是正整数" + "error": t('api.maxRoundsPositive') }), 400 except (ValueError, TypeError): return jsonify({ "success": False, - "error": "max_rounds 必须是有效的整数" + "error": t('api.maxRoundsInvalid') }), 400 if platform not in ['twitter', 'reddit', 'parallel']: return jsonify({ "success": False, - "error": f"无效的平台类型: {platform},可选: twitter/reddit/parallel" + "error": t('api.invalidPlatform', platform=platform) }), 400 # 检查模拟是否已准备好 @@ -1527,7 +1532,7 @@ def start_simulation(): if not state: return jsonify({ "success": False, - "error": f"模拟不存在: {simulation_id}" + "error": t('api.simulationNotFound', id=simulation_id) }), 404 force_restarted = False @@ -1554,7 +1559,7 @@ def start_simulation(): else: return jsonify({ "success": False, - "error": f"模拟正在运行中,请先调用 /stop 接口停止,或使用 force=true 强制重新开始" + "error": t('api.simRunningForceHint') }), 400 # 如果是强制模式,清理运行日志 @@ -1573,7 +1578,7 @@ def start_simulation(): # 准备工作未完成 return jsonify({ "success": False, - "error": f"模拟未准备好,当前状态: {state.status.value},请先调用 /prepare 接口" + "error": t('api.simNotReady', status=state.status.value) }), 400 # 获取图谱ID(用于图谱记忆更新) @@ -1590,7 +1595,7 @@ def start_simulation(): if not graph_id: return jsonify({ "success": False, - "error": "启用图谱记忆更新需要有效的 graph_id,请确保项目已构建图谱" + "error": t('api.graphIdRequiredForMemory') }), 400 logger.info(f"启用图谱记忆更新: simulation_id={simulation_id}, graph_id={graph_id}") @@ -1663,7 +1668,7 @@ def stop_simulation(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 run_state = SimulationRunner.stop_simulation(simulation_id) @@ -2011,7 +2016,7 @@ def get_simulation_posts(simulation_id: str): "platform": platform, "count": 0, "posts": [], - "message": "数据库不存在,模拟可能尚未运行" + "message": t('api.dbNotExist') } }) @@ -2197,33 +2202,33 @@ def interview_agent(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 if agent_id is None: return jsonify({ "success": False, - "error": "请提供 agent_id" + "error": t('api.requireAgentId') }), 400 if not prompt: return jsonify({ "success": False, - "error": "请提供 prompt(采访问题)" + "error": t('api.requirePrompt') }), 400 # 验证platform参数 if platform and platform not in ("twitter", "reddit"): return jsonify({ "success": False, - "error": "platform 参数只能是 'twitter' 或 'reddit'" + "error": t('api.invalidInterviewPlatform') }), 400 # 检查环境状态 if not SimulationRunner.check_env_alive(simulation_id): return jsonify({ "success": False, - "error": "模拟环境未运行或已关闭。请确保模拟已完成并进入等待命令模式。" + "error": t('api.envNotRunning') }), 400 # 优化prompt,添加前缀避免Agent调用工具 @@ -2251,7 +2256,7 @@ def interview_agent(): except TimeoutError as e: return jsonify({ "success": False, - "error": f"等待Interview响应超时: {str(e)}" + "error": t('api.interviewTimeout', error=str(e)) }), 504 except Exception as e: @@ -2318,20 +2323,20 @@ def interview_agents_batch(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 if not interviews or not isinstance(interviews, list): return jsonify({ "success": False, - "error": "请提供 interviews(采访列表)" + "error": t('api.requireInterviews') }), 400 # 验证platform参数 if platform and platform not in ("twitter", "reddit"): return jsonify({ "success": False, - "error": "platform 参数只能是 'twitter' 或 'reddit'" + "error": t('api.invalidInterviewPlatform') }), 400 # 验证每个采访项 @@ -2339,26 +2344,26 @@ def interview_agents_batch(): if 'agent_id' not in interview: return jsonify({ "success": False, - "error": f"采访列表第{i+1}项缺少 agent_id" + "error": t('api.interviewListMissingAgentId', index=i+1) }), 400 if 'prompt' not in interview: return jsonify({ "success": False, - "error": f"采访列表第{i+1}项缺少 prompt" + "error": t('api.interviewListMissingPrompt', index=i+1) }), 400 # 验证每项的platform(如果有) item_platform = interview.get('platform') if item_platform and item_platform not in ("twitter", "reddit"): return jsonify({ "success": False, - "error": f"采访列表第{i+1}项的platform只能是 'twitter' 或 'reddit'" + "error": t('api.interviewListInvalidPlatform', index=i+1) }), 400 # 检查环境状态 if not SimulationRunner.check_env_alive(simulation_id): return jsonify({ "success": False, - "error": "模拟环境未运行或已关闭。请确保模拟已完成并进入等待命令模式。" + "error": t('api.envNotRunning') }), 400 # 优化每个采访项的prompt,添加前缀避免Agent调用工具 @@ -2389,7 +2394,7 @@ def interview_agents_batch(): except TimeoutError as e: return jsonify({ "success": False, - "error": f"等待批量Interview响应超时: {str(e)}" + "error": t('api.batchInterviewTimeout', error=str(e)) }), 504 except Exception as e: @@ -2445,27 +2450,27 @@ def interview_all_agents(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 if not prompt: return jsonify({ "success": False, - "error": "请提供 prompt(采访问题)" + "error": t('api.requirePrompt') }), 400 # 验证platform参数 if platform and platform not in ("twitter", "reddit"): return jsonify({ "success": False, - "error": "platform 参数只能是 'twitter' 或 'reddit'" + "error": t('api.invalidInterviewPlatform') }), 400 # 检查环境状态 if not SimulationRunner.check_env_alive(simulation_id): return jsonify({ "success": False, - "error": "模拟环境未运行或已关闭。请确保模拟已完成并进入等待命令模式。" + "error": t('api.envNotRunning') }), 400 # 优化prompt,添加前缀避免Agent调用工具 @@ -2492,7 +2497,7 @@ def interview_all_agents(): except TimeoutError as e: return jsonify({ "success": False, - "error": f"等待全局Interview响应超时: {str(e)}" + "error": t('api.globalInterviewTimeout', error=str(e)) }), 504 except Exception as e: @@ -2549,7 +2554,7 @@ def get_interview_history(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 history = SimulationRunner.get_interview_history( @@ -2608,7 +2613,7 @@ def get_env_status(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 env_alive = SimulationRunner.check_env_alive(simulation_id) @@ -2617,9 +2622,9 @@ def get_env_status(): env_status = SimulationRunner.get_env_status_detail(simulation_id) if env_alive: - message = "环境正在运行,可以接收Interview命令" + message = t('api.envRunning') else: - message = "环境未运行或已关闭" + message = t('api.envNotRunningShort') return jsonify({ "success": True, @@ -2676,7 +2681,7 @@ def close_simulation_env(): if not simulation_id: return jsonify({ "success": False, - "error": "请提供 simulation_id" + "error": t('api.requireSimulationId') }), 400 result = SimulationRunner.close_simulation_env( diff --git a/backend/app/models/task.py b/backend/app/models/task.py index e15f35fbd..dfebed23b 100644 --- a/backend/app/models/task.py +++ b/backend/app/models/task.py @@ -10,6 +10,8 @@ from typing import Dict, Any, Optional from dataclasses import dataclass, field +from ..utils.locale import t + class TaskStatus(str, Enum): """任务状态枚举""" @@ -148,7 +150,7 @@ def complete_task(self, task_id: str, result: Dict): task_id, status=TaskStatus.COMPLETED, progress=100, - message="任务完成", + message=t('progress.taskComplete'), result=result ) @@ -157,7 +159,7 @@ def fail_task(self, task_id: str, error: str): self.update_task( task_id, status=TaskStatus.FAILED, - message="任务失败", + message=t('progress.taskFailed'), error=error ) diff --git a/backend/app/services/graph_builder.py b/backend/app/services/graph_builder.py index 0e0444bf3..37c9969c7 100644 --- a/backend/app/services/graph_builder.py +++ b/backend/app/services/graph_builder.py @@ -17,6 +17,7 @@ from ..models.task import TaskManager, TaskStatus from ..utils.zep_paging import fetch_all_nodes, fetch_all_edges from .text_processor import TextProcessor +from ..utils.locale import t, get_locale, set_locale @dataclass @@ -83,10 +84,13 @@ def build_graph_async( } ) + # Capture locale before spawning background thread + current_locale = get_locale() + # 在后台线程中执行构建 thread = threading.Thread( target=self._build_graph_worker, - args=(task_id, text, ontology, graph_name, chunk_size, chunk_overlap, batch_size) + args=(task_id, text, ontology, graph_name, chunk_size, chunk_overlap, batch_size, current_locale) ) thread.daemon = True thread.start() @@ -101,15 +105,17 @@ def _build_graph_worker( graph_name: str, chunk_size: int, chunk_overlap: int, - batch_size: int + batch_size: int, + locale: str = 'zh' ): """图谱构建工作线程""" + set_locale(locale) try: self.task_manager.update_task( task_id, status=TaskStatus.PROCESSING, progress=5, - message="开始构建图谱..." + message=t('progress.startBuildingGraph') ) # 1. 创建图谱 @@ -117,7 +123,7 @@ def _build_graph_worker( self.task_manager.update_task( task_id, progress=10, - message=f"图谱已创建: {graph_id}" + message=t('progress.graphCreated', graphId=graph_id) ) # 2. 设置本体 @@ -125,7 +131,7 @@ def _build_graph_worker( self.task_manager.update_task( task_id, progress=15, - message="本体已设置" + message=t('progress.ontologySet') ) # 3. 文本分块 @@ -134,7 +140,7 @@ def _build_graph_worker( self.task_manager.update_task( task_id, progress=20, - message=f"文本已分割为 {total_chunks} 个块" + message=t('progress.textSplit', count=total_chunks) ) # 4. 分批发送数据 @@ -151,7 +157,7 @@ def _build_graph_worker( self.task_manager.update_task( task_id, progress=60, - message="等待Zep处理数据..." + message=t('progress.waitingZepProcess') ) self._wait_for_episodes( @@ -167,7 +173,7 @@ def _build_graph_worker( self.task_manager.update_task( task_id, progress=90, - message="获取图谱信息..." + message=t('progress.fetchingGraphInfo') ) graph_info = self._get_graph_info(graph_id) @@ -304,7 +310,7 @@ def add_text_batches( if progress_callback: progress = (i + len(batch_chunks)) / total_chunks progress_callback( - f"发送第 {batch_num}/{total_batches} 批数据 ({len(batch_chunks)} 块)...", + t('progress.sendingBatch', current=batch_num, total=total_batches, chunks=len(batch_chunks)), progress ) @@ -333,7 +339,7 @@ def add_text_batches( except Exception as e: if progress_callback: - progress_callback(f"批次 {batch_num} 发送失败: {str(e)}", 0) + progress_callback(t('progress.batchFailed', batch=batch_num, error=str(e)), 0) raise return episode_uuids @@ -347,7 +353,7 @@ def _wait_for_episodes( """等待所有 episode 处理完成(通过查询每个 episode 的 processed 状态)""" if not episode_uuids: if progress_callback: - progress_callback("无需等待(没有 episode)", 1.0) + progress_callback(t('progress.noEpisodesWait'), 1.0) return start_time = time.time() @@ -356,13 +362,13 @@ def _wait_for_episodes( total_episodes = len(episode_uuids) if progress_callback: - progress_callback(f"开始等待 {total_episodes} 个文本块处理...", 0) + progress_callback(t('progress.waitingEpisodes', count=total_episodes), 0) while pending_episodes: if time.time() - start_time > timeout: if progress_callback: progress_callback( - f"部分文本块超时,已完成 {completed_count}/{total_episodes}", + t('progress.episodesTimeout', completed=completed_count, total=total_episodes), completed_count / total_episodes ) break @@ -384,7 +390,7 @@ def _wait_for_episodes( elapsed = int(time.time() - start_time) if progress_callback: progress_callback( - f"Zep处理中... {completed_count}/{total_episodes} 完成, {len(pending_episodes)} 待处理 ({elapsed}秒)", + t('progress.zepProcessing', completed=completed_count, total=total_episodes, pending=len(pending_episodes), elapsed=elapsed), completed_count / total_episodes if total_episodes > 0 else 0 ) @@ -392,7 +398,7 @@ def _wait_for_episodes( time.sleep(3) # 每3秒检查一次 if progress_callback: - progress_callback(f"处理完成: {completed_count}/{total_episodes}", 1.0) + progress_callback(t('progress.processingComplete', completed=completed_count, total=total_episodes), 1.0) def _get_graph_info(self, graph_id: str) -> GraphInfo: """获取图谱信息""" diff --git a/backend/app/services/oasis_profile_generator.py b/backend/app/services/oasis_profile_generator.py index 57836c539..7704a627e 100644 --- a/backend/app/services/oasis_profile_generator.py +++ b/backend/app/services/oasis_profile_generator.py @@ -20,6 +20,7 @@ from ..config import Config from ..utils.logger import get_logger +from ..utils.locale import get_language_instruction, get_locale, set_locale, t from .zep_entity_reader import EntityNode, ZepEntityReader logger = get_logger('mirofish.oasis_profile') @@ -313,7 +314,7 @@ def _search_zep_for_entity(self, entity: EntityNode) -> Dict[str, Any]: logger.debug(f"跳过Zep检索:未设置graph_id") return results - comprehensive_query = f"关于{entity_name}的所有信息、活动、事件、关系和背景" + comprehensive_query = t('progress.zepSearchQuery', name=entity_name) def search_edges(): """搜索边(事实/关系)- 带重试机制""" @@ -670,8 +671,8 @@ def fix_string_newlines(match): def _get_system_prompt(self, is_individual: bool) -> str: """获取系统提示词""" - base_prompt = "你是社交媒体用户画像生成专家。生成详细、真实的人设用于舆论模拟,最大程度还原已有现实情况。必须返回有效的JSON格式,所有字符串值不能包含未转义的换行符。使用中文。" - return base_prompt + base_prompt = "你是社交媒体用户画像生成专家。生成详细、真实的人设用于舆论模拟,最大程度还原已有现实情况。必须返回有效的JSON格式,所有字符串值不能包含未转义的换行符。" + return f"{base_prompt}\n\n{get_language_instruction()}" def _build_individual_persona_prompt( self, @@ -717,7 +718,7 @@ def _build_individual_persona_prompt( 重要: - 所有字段值必须是字符串或数字,不要使用换行符 - persona必须是一段连贯的文字描述 -- 使用中文(除了gender字段必须用英文male/female) +- {get_language_instruction()} (gender字段必须用英文male/female) - 内容要与实体信息保持一致 - age必须是有效的整数,gender必须是"male"或"female" """ @@ -766,7 +767,7 @@ def _build_group_persona_prompt( 重要: - 所有字段值必须是字符串或数字,不允许null值 - persona必须是一段连贯的文字描述,不要使用换行符 -- 使用中文(除了gender字段必须用英文"other") +- {get_language_instruction()} (gender字段必须用英文"other") - age必须是整数30,gender必须是字符串"other" - 机构账号发言要符合其身份定位""" @@ -915,8 +916,12 @@ def save_profiles_realtime(): except Exception as e: logger.warning(f"实时保存 profiles 失败: {e}") + # Capture locale before spawning thread pool workers + current_locale = get_locale() + def generate_single_profile(idx: int, entity: EntityNode) -> tuple: """生成单个profile的工作函数""" + set_locale(current_locale) entity_type = entity.get_entity_type() or "Entity" try: @@ -1017,7 +1022,7 @@ def _print_generated_profile(self, entity_name: str, entity_type: str, profile: output_lines = [ f"\n{separator}", - f"[已生成] {entity_name} ({entity_type})", + t('progress.profileGenerated', name=entity_name, type=entity_type), f"{separator}", f"用户名: {profile.user_name}", f"", diff --git a/backend/app/services/ontology_generator.py b/backend/app/services/ontology_generator.py index 2d3e39bd8..0601f3a31 100644 --- a/backend/app/services/ontology_generator.py +++ b/backend/app/services/ontology_generator.py @@ -6,6 +6,7 @@ import json from typing import Dict, Any, List, Optional from ..utils.llm_client import LLMClient +from ..utils.locale import get_language_instruction # 本体生成的系统提示词 @@ -66,7 +67,7 @@ "attributes": [] } ], - "analysis_summary": "对文本内容的简要分析说明(中文)" + "analysis_summary": "对文本内容的简要分析说明" } ``` @@ -188,8 +189,10 @@ def generate( additional_context ) + lang_instruction = get_language_instruction() + system_prompt = f"{ONTOLOGY_SYSTEM_PROMPT}\n\n{lang_instruction}\nIMPORTANT: Entity type names MUST be in English PascalCase (e.g., 'PersonEntity', 'MediaOrganization'). Relationship type names MUST be in English UPPER_SNAKE_CASE (e.g., 'WORKS_FOR'). Attribute names MUST be in English snake_case. Only description fields and analysis_summary should use the specified language above." messages = [ - {"role": "system", "content": ONTOLOGY_SYSTEM_PROMPT}, + {"role": "system", "content": system_prompt}, {"role": "user", "content": user_message} ] diff --git a/backend/app/services/report_agent.py b/backend/app/services/report_agent.py index 02ca5bdc2..cecd70b46 100644 --- a/backend/app/services/report_agent.py +++ b/backend/app/services/report_agent.py @@ -21,6 +21,7 @@ from ..config import Config from ..utils.llm_client import LLMClient from ..utils.logger import get_logger +from ..utils.locale import get_language_instruction, t from .zep_tools import ( ZepToolsService, SearchResult, @@ -105,7 +106,7 @@ def log_start(self, simulation_id: str, graph_id: str, simulation_requirement: s "simulation_id": simulation_id, "graph_id": graph_id, "simulation_requirement": simulation_requirement, - "message": "报告生成任务开始" + "message": t('report.taskStarted') } ) @@ -114,7 +115,7 @@ def log_planning_start(self): self.log( action="planning_start", stage="planning", - details={"message": "开始规划报告大纲"} + details={"message": t('report.planningStart')} ) def log_planning_context(self, context: Dict[str, Any]): @@ -123,7 +124,7 @@ def log_planning_context(self, context: Dict[str, Any]): action="planning_context", stage="planning", details={ - "message": "获取模拟上下文信息", + "message": t('report.fetchSimContext'), "context": context } ) @@ -134,7 +135,7 @@ def log_planning_complete(self, outline_dict: Dict[str, Any]): action="planning_complete", stage="planning", details={ - "message": "大纲规划完成", + "message": t('report.planningComplete'), "outline": outline_dict } ) @@ -146,7 +147,7 @@ def log_section_start(self, section_title: str, section_index: int): stage="generating", section_title=section_title, section_index=section_index, - details={"message": f"开始生成章节: {section_title}"} + details={"message": t('report.sectionStart', title=section_title)} ) def log_react_thought(self, section_title: str, section_index: int, iteration: int, thought: str): @@ -159,7 +160,7 @@ def log_react_thought(self, section_title: str, section_index: int, iteration: i details={ "iteration": iteration, "thought": thought, - "message": f"ReACT 第{iteration}轮思考" + "message": t('report.reactThought', iteration=iteration) } ) @@ -181,7 +182,7 @@ def log_tool_call( "iteration": iteration, "tool_name": tool_name, "parameters": parameters, - "message": f"调用工具: {tool_name}" + "message": t('report.toolCall', toolName=tool_name) } ) @@ -204,7 +205,7 @@ def log_tool_result( "tool_name": tool_name, "result": result, # 完整结果,不截断 "result_length": len(result), - "message": f"工具 {tool_name} 返回结果" + "message": t('report.toolResult', toolName=tool_name) } ) @@ -229,7 +230,7 @@ def log_llm_response( "response_length": len(response), "has_tool_calls": has_tool_calls, "has_final_answer": has_final_answer, - "message": f"LLM 响应 (工具调用: {has_tool_calls}, 最终答案: {has_final_answer})" + "message": t('report.llmResponse', hasToolCalls=has_tool_calls, hasFinalAnswer=has_final_answer) } ) @@ -250,7 +251,7 @@ def log_section_content( "content": content, # 完整内容,不截断 "content_length": len(content), "tool_calls_count": tool_calls_count, - "message": f"章节 {section_title} 内容生成完成" + "message": t('report.sectionContentDone', title=section_title) } ) @@ -273,7 +274,7 @@ def log_section_full_complete( details={ "content": full_content, "content_length": len(full_content), - "message": f"章节 {section_title} 生成完成" + "message": t('report.sectionComplete', title=section_title) } ) @@ -285,7 +286,7 @@ def log_report_complete(self, total_sections: int, total_time_seconds: float): details={ "total_sections": total_sections, "total_time_seconds": round(total_time_seconds, 2), - "message": "报告生成完成" + "message": t('report.reportComplete') } ) @@ -298,7 +299,7 @@ def log_error(self, error_message: str, stage: str, section_title: str = None): section_index=None, details={ "error": error_message, - "message": f"发生错误: {error_message}" + "message": t('report.errorOccurred', error=error_message) } ) @@ -652,9 +653,9 @@ def to_dict(self) -> Dict[str, Any]: - 这些引用是模拟预测的核心证据 3. 【语言一致性 - 引用内容必须翻译为报告语言】 - - 工具返回的内容可能包含英文或中英文混杂的表述 - - 如果模拟需求和材料原文是中文的,报告必须全部使用中文撰写 - - 当你引用工具返回的英文或中英混杂内容时,必须将其翻译为流畅的中文后再写入报告 + - 工具返回的内容可能包含与报告语言不同的表述 + - 报告必须全部使用与用户指定语言一致的语言撰写 + - 当你引用工具返回的其他语言内容时,必须将其翻译为报告语言后再写入 - 翻译时保持原意不变,确保表述自然通顺 - 这一规则同时适用于正文和引用块(> 格式)中的内容 @@ -913,7 +914,7 @@ def __init__( # 控制台日志记录器(在 generate_report 中初始化) self.console_logger: Optional[ReportConsoleLogger] = None - logger.info(f"ReportAgent 初始化完成: graph_id={graph_id}, simulation_id={simulation_id}") + logger.info(t('report.agentInitDone', graphId=graph_id, simulationId=simulation_id)) def _define_tools(self) -> Dict[str, Dict[str, Any]]: """定义可用工具""" @@ -964,7 +965,7 @@ def _execute_tool(self, tool_name: str, parameters: Dict[str, Any], report_conte Returns: 工具执行结果(文本格式) """ - logger.info(f"执行工具: {tool_name}, 参数: {parameters}") + logger.info(t('report.executingTool', toolName=tool_name, params=parameters)) try: if tool_name == "insight_forge": @@ -1023,7 +1024,7 @@ def _execute_tool(self, tool_name: str, parameters: Dict[str, Any], report_conte elif tool_name == "search_graph": # 重定向到 quick_search - logger.info("search_graph 已重定向到 quick_search") + logger.info(t('report.redirectToQuickSearch')) return self._execute_tool("quick_search", parameters, report_context) elif tool_name == "get_graph_statistics": @@ -1040,7 +1041,7 @@ def _execute_tool(self, tool_name: str, parameters: Dict[str, Any], report_conte elif tool_name == "get_simulation_context": # 重定向到 insight_forge,因为它更强大 - logger.info("get_simulation_context 已重定向到 insight_forge") + logger.info(t('report.redirectToInsightForge')) query = parameters.get("query", self.simulation_requirement) return self._execute_tool("insight_forge", {"query": query}, report_context) @@ -1057,7 +1058,7 @@ def _execute_tool(self, tool_name: str, parameters: Dict[str, Any], report_conte return f"未知工具: {tool_name}。请使用以下工具之一: insight_forge, panorama_search, quick_search" except Exception as e: - logger.error(f"工具执行失败: {tool_name}, 错误: {str(e)}") + logger.error(t('report.toolExecFailed', toolName=tool_name, error=str(e))) return f"工具执行失败: {str(e)}" # 合法的工具名称集合,用于裸 JSON 兜底解析时校验 @@ -1148,10 +1149,10 @@ def plan_outline( Returns: ReportOutline: 报告大纲 """ - logger.info("开始规划报告大纲...") + logger.info(t('report.startPlanningOutline')) if progress_callback: - progress_callback("planning", 0, "正在分析模拟需求...") + progress_callback("planning", 0, t('progress.analyzingRequirements')) # 首先获取模拟上下文 context = self.zep_tools.get_simulation_context( @@ -1160,9 +1161,9 @@ def plan_outline( ) if progress_callback: - progress_callback("planning", 30, "正在生成报告大纲...") + progress_callback("planning", 30, t('progress.generatingOutline')) - system_prompt = PLAN_SYSTEM_PROMPT + system_prompt = f"{PLAN_SYSTEM_PROMPT}\n\n{get_language_instruction()}" user_prompt = PLAN_USER_PROMPT_TEMPLATE.format( simulation_requirement=self.simulation_requirement, total_nodes=context.get('graph_statistics', {}).get('total_nodes', 0), @@ -1182,7 +1183,7 @@ def plan_outline( ) if progress_callback: - progress_callback("planning", 80, "正在解析大纲结构...") + progress_callback("planning", 80, t('progress.parsingOutline')) # 解析大纲 sections = [] @@ -1199,13 +1200,13 @@ def plan_outline( ) if progress_callback: - progress_callback("planning", 100, "大纲规划完成") + progress_callback("planning", 100, t('progress.outlinePlanComplete')) - logger.info(f"大纲规划完成: {len(sections)} 个章节") + logger.info(t('report.outlinePlanDone', count=len(sections))) return outline except Exception as e: - logger.error(f"大纲规划失败: {str(e)}") + logger.error(t('report.outlinePlanFailed', error=str(e))) # 返回默认大纲(3个章节,作为fallback) return ReportOutline( title="未来预测报告", @@ -1245,7 +1246,7 @@ def _generate_section_react( Returns: 章节内容(Markdown格式) """ - logger.info(f"ReACT生成章节: {section.title}") + logger.info(t('report.reactGenerateSection', title=section.title)) # 记录章节开始日志 if self.report_logger: @@ -1258,6 +1259,7 @@ def _generate_section_react( section_title=section.title, tools_description=self._get_tools_description(), ) + system_prompt = f"{system_prompt}\n\n{get_language_instruction()}" # 构建用户prompt - 每个已完成章节各传入最大4000字 if previous_sections: @@ -1296,7 +1298,7 @@ def _generate_section_react( progress_callback( "generating", int((iteration / max_iterations) * 100), - f"深度检索与撰写中 ({tool_calls_count}/{self.MAX_TOOL_CALLS_PER_SECTION})" + t('progress.deepSearchAndWrite', current=tool_calls_count, max=self.MAX_TOOL_CALLS_PER_SECTION) ) # 调用LLM @@ -1308,7 +1310,7 @@ def _generate_section_react( # 检查 LLM 返回是否为 None(API 异常或内容为空) if response is None: - logger.warning(f"章节 {section.title} 第 {iteration + 1} 次迭代: LLM 返回 None") + logger.warning(t('report.sectionIterNone', title=section.title, iteration=iteration + 1)) # 如果还有迭代次数,添加消息并重试 if iteration < max_iterations - 1: messages.append({"role": "assistant", "content": "(响应为空)"}) @@ -1328,8 +1330,7 @@ def _generate_section_react( if has_tool_calls and has_final_answer: conflict_retries += 1 logger.warning( - f"章节 {section.title} 第 {iteration+1} 轮: " - f"LLM 同时输出工具调用和 Final Answer(第 {conflict_retries} 次冲突)" + t('report.sectionConflict', title=section.title, iteration=iteration+1, conflictCount=conflict_retries) ) if conflict_retries <= 2: @@ -1349,8 +1350,7 @@ def _generate_section_react( else: # 第三次:降级处理,截断到第一个工具调用,强制执行 logger.warning( - f"章节 {section.title}: 连续 {conflict_retries} 次冲突," - "降级为截断执行第一个工具调用" + t('report.sectionConflictDowngrade', title=section.title, conflictCount=conflict_retries) ) first_tool_end = response.find('') if first_tool_end != -1: @@ -1390,7 +1390,7 @@ def _generate_section_react( # 正常结束 final_answer = response.split("Final Answer:")[-1].strip() - logger.info(f"章节 {section.title} 生成完成(工具调用: {tool_calls_count}次)") + logger.info(t('report.sectionGenDone', title=section.title, count=tool_calls_count)) if self.report_logger: self.report_logger.log_section_content( @@ -1418,7 +1418,7 @@ def _generate_section_react( # 只执行第一个工具调用 call = tool_calls[0] if len(tool_calls) > 1: - logger.info(f"LLM 尝试调用 {len(tool_calls)} 个工具,只执行第一个: {call['name']}") + logger.info(t('report.multiToolOnlyFirst', total=len(tool_calls), toolName=call['name'])) if self.report_logger: self.report_logger.log_tool_call( @@ -1487,7 +1487,7 @@ def _generate_section_react( # 工具调用已足够,LLM 输出了内容但没带 "Final Answer:" 前缀 # 直接将这段内容作为最终答案,不再空转 - logger.info(f"章节 {section.title} 未检测到 'Final Answer:' 前缀,直接采纳LLM输出作为最终内容(工具调用: {tool_calls_count}次)") + logger.info(t('report.sectionNoPrefix', title=section.title, count=tool_calls_count)) final_answer = response.strip() if self.report_logger: @@ -1500,7 +1500,7 @@ def _generate_section_react( return final_answer # 达到最大迭代次数,强制生成内容 - logger.warning(f"章节 {section.title} 达到最大迭代次数,强制生成") + logger.warning(t('report.sectionMaxIter', title=section.title)) messages.append({"role": "user", "content": REACT_FORCE_FINAL_MSG}) response = self.llm.chat( @@ -1511,8 +1511,8 @@ def _generate_section_react( # 检查强制收尾时 LLM 返回是否为 None if response is None: - logger.error(f"章节 {section.title} 强制收尾时 LLM 返回 None,使用默认错误提示") - final_answer = f"(本章节生成失败:LLM 返回空响应,请稍后重试)" + logger.error(t('report.sectionForceFailed', title=section.title)) + final_answer = t('report.sectionGenFailedContent') elif "Final Answer:" in response: final_answer = response.split("Final Answer:")[-1].strip() else: @@ -1590,7 +1590,7 @@ def generate_report( self.console_logger = ReportConsoleLogger(report_id) ReportManager.update_progress( - report_id, "pending", 0, "初始化报告...", + report_id, "pending", 0, t('progress.initReport'), completed_sections=[] ) ReportManager.save_report(report) @@ -1598,7 +1598,7 @@ def generate_report( # 阶段1: 规划大纲 report.status = ReportStatus.PLANNING ReportManager.update_progress( - report_id, "planning", 5, "开始规划报告大纲...", + report_id, "planning", 5, t('progress.startPlanningOutline'), completed_sections=[] ) @@ -1606,7 +1606,7 @@ def generate_report( self.report_logger.log_planning_start() if progress_callback: - progress_callback("planning", 0, "开始规划报告大纲...") + progress_callback("planning", 0, t('progress.startPlanningOutline')) outline = self.plan_outline( progress_callback=lambda stage, prog, msg: @@ -1620,12 +1620,12 @@ def generate_report( # 保存大纲到文件 ReportManager.save_outline(report_id, outline) ReportManager.update_progress( - report_id, "planning", 15, f"大纲规划完成,共{len(outline.sections)}个章节", + report_id, "planning", 15, t('progress.outlineDone', count=len(outline.sections)), completed_sections=[] ) ReportManager.save_report(report) - logger.info(f"大纲已保存到文件: {report_id}/outline.json") + logger.info(t('report.outlineSavedToFile', reportId=report_id)) # 阶段2: 逐章节生成(分章节保存) report.status = ReportStatus.GENERATING @@ -1640,16 +1640,16 @@ def generate_report( # 更新进度 ReportManager.update_progress( report_id, "generating", base_progress, - f"正在生成章节: {section.title} ({section_num}/{total_sections})", + t('progress.generatingSection', title=section.title, current=section_num, total=total_sections), current_section=section.title, completed_sections=completed_section_titles ) - + if progress_callback: progress_callback( - "generating", - base_progress, - f"正在生成章节: {section.title} ({section_num}/{total_sections})" + "generating", + base_progress, + t('progress.generatingSection', title=section.title, current=section_num, total=total_sections) ) # 生成主章节内容 @@ -1683,23 +1683,23 @@ def generate_report( full_content=full_section_content.strip() ) - logger.info(f"章节已保存: {report_id}/section_{section_num:02d}.md") + logger.info(t('report.sectionSaved', reportId=report_id, sectionNum=f"{section_num:02d}")) # 更新进度 ReportManager.update_progress( report_id, "generating", base_progress + int(70 / total_sections), - f"章节 {section.title} 已完成", + t('progress.sectionDone', title=section.title), current_section=None, completed_sections=completed_section_titles ) # 阶段3: 组装完整报告 if progress_callback: - progress_callback("generating", 95, "正在组装完整报告...") + progress_callback("generating", 95, t('progress.assemblingReport')) ReportManager.update_progress( - report_id, "generating", 95, "正在组装完整报告...", + report_id, "generating", 95, t('progress.assemblingReport'), completed_sections=completed_section_titles ) @@ -1721,14 +1721,14 @@ def generate_report( # 保存最终报告 ReportManager.save_report(report) ReportManager.update_progress( - report_id, "completed", 100, "报告生成完成", + report_id, "completed", 100, t('progress.reportComplete'), completed_sections=completed_section_titles ) if progress_callback: - progress_callback("completed", 100, "报告生成完成") + progress_callback("completed", 100, t('progress.reportComplete')) - logger.info(f"报告生成完成: {report_id}") + logger.info(t('report.reportGenDone', reportId=report_id)) # 关闭控制台日志记录器 if self.console_logger: @@ -1738,7 +1738,7 @@ def generate_report( return report except Exception as e: - logger.error(f"报告生成失败: {str(e)}") + logger.error(t('report.reportGenFailed', error=str(e))) report.status = ReportStatus.FAILED report.error = str(e) @@ -1750,7 +1750,7 @@ def generate_report( try: ReportManager.save_report(report) ReportManager.update_progress( - report_id, "failed", -1, f"报告生成失败: {str(e)}", + report_id, "failed", -1, t('progress.reportFailed', error=str(e)), completed_sections=completed_section_titles ) except Exception: @@ -1784,7 +1784,7 @@ def chat( "sources": [信息来源] } """ - logger.info(f"Report Agent对话: {message[:50]}...") + logger.info(t('report.agentChat', message=message[:50])) chat_history = chat_history or [] @@ -1798,13 +1798,14 @@ def chat( if len(report.markdown_content) > 15000: report_content += "\n\n... [报告内容已截断] ..." except Exception as e: - logger.warning(f"获取报告内容失败: {e}") + logger.warning(t('report.fetchReportFailed', error=e)) system_prompt = CHAT_SYSTEM_PROMPT_TEMPLATE.format( simulation_requirement=self.simulation_requirement, report_content=report_content if report_content else "(暂无报告)", tools_description=self._get_tools_description(), ) + system_prompt = f"{system_prompt}\n\n{get_language_instruction()}" # 构建消息 messages = [{"role": "system", "content": system_prompt}] @@ -2088,7 +2089,7 @@ def save_outline(cls, report_id: str, outline: ReportOutline) -> None: with open(cls._get_outline_path(report_id), 'w', encoding='utf-8') as f: json.dump(outline.to_dict(), f, ensure_ascii=False, indent=2) - logger.info(f"大纲已保存: {report_id}") + logger.info(t('report.outlineSaved', reportId=report_id)) @classmethod def save_section( @@ -2124,7 +2125,7 @@ def save_section( with open(file_path, 'w', encoding='utf-8') as f: f.write(md_content) - logger.info(f"章节已保存: {report_id}/{file_suffix}") + logger.info(t('report.sectionFileSaved', reportId=report_id, fileSuffix=file_suffix)) return file_path @classmethod @@ -2293,7 +2294,7 @@ def assemble_full_report(cls, report_id: str, outline: ReportOutline) -> str: with open(full_path, 'w', encoding='utf-8') as f: f.write(md_content) - logger.info(f"完整报告已组装: {report_id}") + logger.info(t('report.fullReportAssembled', reportId=report_id)) return md_content @classmethod @@ -2440,7 +2441,7 @@ def save_report(cls, report: Report) -> None: with open(cls._get_report_markdown_path(report.report_id), 'w', encoding='utf-8') as f: f.write(report.markdown_content) - logger.info(f"报告已保存: {report.report_id}") + logger.info(t('report.reportSaved', reportId=report.report_id)) @classmethod def get_report(cls, report_id: str) -> Optional[Report]: @@ -2553,7 +2554,7 @@ def delete_report(cls, report_id: str) -> bool: # 新格式:删除整个文件夹 if os.path.exists(folder_path) and os.path.isdir(folder_path): shutil.rmtree(folder_path) - logger.info(f"报告文件夹已删除: {report_id}") + logger.info(t('report.reportFolderDeleted', reportId=report_id)) return True # 兼容旧格式:删除单独的文件 diff --git a/backend/app/services/simulation_config_generator.py b/backend/app/services/simulation_config_generator.py index cc362508b..cb77f6b6c 100644 --- a/backend/app/services/simulation_config_generator.py +++ b/backend/app/services/simulation_config_generator.py @@ -20,6 +20,7 @@ from ..config import Config from ..utils.logger import get_logger +from ..utils.locale import get_language_instruction, t from .zep_entity_reader import EntityNode, ZepEntityReader logger = get_logger('mirofish.simulation_config') @@ -292,17 +293,17 @@ def report_progress(step: int, message: str): reasoning_parts = [] # ========== 步骤1: 生成时间配置 ========== - report_progress(1, "生成时间配置...") + report_progress(1, t('progress.generatingTimeConfig')) num_entities = len(entities) time_config_result = self._generate_time_config(context, num_entities) time_config = self._parse_time_config(time_config_result, num_entities) - reasoning_parts.append(f"时间配置: {time_config_result.get('reasoning', '成功')}") + reasoning_parts.append(f"{t('progress.timeConfigLabel')}: {time_config_result.get('reasoning', t('common.success'))}") # ========== 步骤2: 生成事件配置 ========== - report_progress(2, "生成事件配置和热点话题...") + report_progress(2, t('progress.generatingEventConfig')) event_config_result = self._generate_event_config(context, simulation_requirement, entities) event_config = self._parse_event_config(event_config_result) - reasoning_parts.append(f"事件配置: {event_config_result.get('reasoning', '成功')}") + reasoning_parts.append(f"{t('progress.eventConfigLabel')}: {event_config_result.get('reasoning', t('common.success'))}") # ========== 步骤3-N: 分批生成Agent配置 ========== all_agent_configs = [] @@ -313,7 +314,7 @@ def report_progress(step: int, message: str): report_progress( 3 + batch_idx, - f"生成Agent配置 ({start_idx + 1}-{end_idx}/{len(entities)})..." + t('progress.generatingAgentConfig', start=start_idx + 1, end=end_idx, total=len(entities)) ) batch_configs = self._generate_agent_configs_batch( @@ -324,16 +325,16 @@ def report_progress(step: int, message: str): ) all_agent_configs.extend(batch_configs) - reasoning_parts.append(f"Agent配置: 成功生成 {len(all_agent_configs)} 个") + reasoning_parts.append(t('progress.agentConfigResult', count=len(all_agent_configs))) # ========== 为初始帖子分配发布者 Agent ========== logger.info("为初始帖子分配合适的发布者 Agent...") event_config = self._assign_initial_post_agents(event_config, all_agent_configs) assigned_count = len([p for p in event_config.initial_posts if p.get("poster_agent_id") is not None]) - reasoning_parts.append(f"初始帖子分配: {assigned_count} 个帖子已分配发布者") + reasoning_parts.append(t('progress.postAssignResult', count=assigned_count)) # ========== 最后一步: 生成平台配置 ========== - report_progress(total_steps, "生成平台配置...") + report_progress(total_steps, t('progress.generatingPlatformConfig')) twitter_config = None reddit_config = None @@ -547,7 +548,7 @@ def _generate_time_config(self, context: str, num_entities: int) -> Dict[str, An 请生成时间配置JSON。 ### 基本原则(仅供参考,需根据具体事件和参与群体灵活调整): -- 用户群体为中国人,需符合北京时间作息习惯 +- 请根据模拟场景推断目标用户群体所在时区和作息习惯,以下为东八区(UTC+8)的参考示例 - 凌晨0-5点几乎无人活动(活跃度系数0.05) - 早上6-8点逐渐活跃(活跃度系数0.4) - 工作时间9-18点中等活跃(活跃度系数0.7) @@ -584,8 +585,9 @@ def _generate_time_config(self, context: str, num_entities: int) -> Dict[str, An - work_hours (int数组): 工作时段 - reasoning (string): 简要说明为什么这样配置""" - system_prompt = "你是社交媒体模拟专家。返回纯JSON格式,时间配置需符合中国人作息习惯。" - + system_prompt = "你是社交媒体模拟专家。返回纯JSON格式,时间配置需符合模拟场景中目标用户群体的作息习惯。" + system_prompt = f"{system_prompt}\n\n{get_language_instruction()}" + try: return self._call_llm_with_retry(prompt, system_prompt) except Exception as e: @@ -701,7 +703,8 @@ def _generate_event_config( }}""" system_prompt = "你是舆论分析专家。返回纯JSON格式。注意 poster_type 必须精确匹配可用实体类型。" - + system_prompt = f"{system_prompt}\n\n{get_language_instruction()}\nIMPORTANT: The 'poster_type' field value MUST be in English PascalCase exactly matching the available entity types. Only 'content', 'narrative_direction', 'hot_topics' and 'reasoning' fields should use the specified language." + try: return self._call_llm_with_retry(prompt, system_prompt) except Exception as e: @@ -838,7 +841,7 @@ def _generate_agent_configs_batch( ## 任务 为每个实体生成活动配置,注意: -- **时间符合中国人作息**:凌晨0-5点几乎不活动,晚间19-22点最活跃 +- **时间符合目标用户群体作息**:以下为参考(东八区),请根据模拟场景调整 - **官方机构**(University/GovernmentAgency):活跃度低(0.1-0.3),工作时间(9-17)活动,响应慢(60-240分钟),影响力高(2.5-3.0) - **媒体**(MediaOutlet):活跃度中(0.4-0.6),全天活动(8-23),响应快(5-30分钟),影响力高(2.0-2.5) - **个人**(Student/Person/Alumni):活跃度高(0.6-0.9),主要晚间活动(18-23),响应快(1-15分钟),影响力低(0.8-1.2) @@ -863,8 +866,9 @@ def _generate_agent_configs_batch( ] }}""" - system_prompt = "你是社交媒体行为分析专家。返回纯JSON,配置需符合中国人作息习惯。" - + system_prompt = "你是社交媒体行为分析专家。返回纯JSON,配置需符合模拟场景中目标用户群体的作息习惯。" + system_prompt = f"{system_prompt}\n\n{get_language_instruction()}\nIMPORTANT: The 'stance' field value MUST be one of the English strings: 'supportive', 'opposing', 'neutral', 'observer'. All JSON field names and numeric values must remain unchanged. Only natural language text fields should use the specified language." + try: result = self._call_llm_with_retry(prompt, system_prompt) llm_configs = {cfg["agent_id"]: cfg for cfg in result.get("agent_configs", [])} diff --git a/backend/app/services/simulation_manager.py b/backend/app/services/simulation_manager.py index 96c496fd4..0d161a909 100644 --- a/backend/app/services/simulation_manager.py +++ b/backend/app/services/simulation_manager.py @@ -17,6 +17,7 @@ from .zep_entity_reader import ZepEntityReader, FilteredEntities from .oasis_profile_generator import OasisProfileGenerator, OasisAgentProfile from .simulation_config_generator import SimulationConfigGenerator, SimulationParameters +from ..utils.locale import t logger = get_logger('mirofish.simulation') @@ -270,12 +271,12 @@ def prepare_simulation( # ========== 阶段1: 读取并过滤实体 ========== if progress_callback: - progress_callback("reading", 0, "正在连接Zep图谱...") + progress_callback("reading", 0, t('progress.connectingZepGraph')) reader = ZepEntityReader() if progress_callback: - progress_callback("reading", 30, "正在读取节点数据...") + progress_callback("reading", 30, t('progress.readingNodeData')) filtered = reader.filter_defined_entities( graph_id=state.graph_id, @@ -288,8 +289,8 @@ def prepare_simulation( if progress_callback: progress_callback( - "reading", 100, - f"完成,共 {filtered.filtered_count} 个实体", + "reading", 100, + t('progress.readingComplete', count=filtered.filtered_count), current=filtered.filtered_count, total=filtered.filtered_count ) @@ -305,8 +306,8 @@ def prepare_simulation( if progress_callback: progress_callback( - "generating_profiles", 0, - "开始生成...", + "generating_profiles", 0, + t('progress.startGenerating'), current=0, total=total_entities ) @@ -351,8 +352,8 @@ def profile_progress(current, total, msg): # Reddit 已经在生成过程中实时保存了,这里再保存一次确保完整性 if progress_callback: progress_callback( - "generating_profiles", 95, - "保存Profile文件...", + "generating_profiles", 95, + t('progress.savingProfiles'), current=total_entities, total=total_entities ) @@ -374,8 +375,8 @@ def profile_progress(current, total, msg): if progress_callback: progress_callback( - "generating_profiles", 100, - f"完成,共 {len(profiles)} 个Profile", + "generating_profiles", 100, + t('progress.profilesComplete', count=len(profiles)), current=len(profiles), total=len(profiles) ) @@ -383,8 +384,8 @@ def profile_progress(current, total, msg): # ========== 阶段3: LLM智能生成模拟配置 ========== if progress_callback: progress_callback( - "generating_config", 0, - "正在分析模拟需求...", + "generating_config", 0, + t('progress.analyzingRequirements'), current=0, total=3 ) @@ -393,8 +394,8 @@ def profile_progress(current, total, msg): if progress_callback: progress_callback( - "generating_config", 30, - "正在调用LLM生成配置...", + "generating_config", 30, + t('progress.callingLLMConfig'), current=1, total=3 ) @@ -412,8 +413,8 @@ def profile_progress(current, total, msg): if progress_callback: progress_callback( - "generating_config", 70, - "正在保存配置文件...", + "generating_config", 70, + t('progress.savingConfigFiles'), current=2, total=3 ) @@ -428,8 +429,8 @@ def profile_progress(current, total, msg): if progress_callback: progress_callback( - "generating_config", 100, - "配置生成完成", + "generating_config", 100, + t('progress.configComplete'), current=3, total=3 ) diff --git a/backend/app/services/simulation_runner.py b/backend/app/services/simulation_runner.py index 8c35380d1..e86021f80 100644 --- a/backend/app/services/simulation_runner.py +++ b/backend/app/services/simulation_runner.py @@ -20,6 +20,7 @@ from ..config import Config from ..utils.logger import get_logger +from ..utils.locale import get_locale, set_locale from .zep_graph_memory_updater import ZepGraphMemoryManager from .simulation_ipc import SimulationIPCClient, CommandType, IPCResponse @@ -455,10 +456,13 @@ def start_simulation( cls._processes[simulation_id] = process cls._save_run_state(state) + # Capture locale before spawning monitor thread + current_locale = get_locale() + # 启动监控线程 monitor_thread = threading.Thread( target=cls._monitor_simulation, - args=(simulation_id,), + args=(simulation_id, current_locale), daemon=True ) monitor_thread.start() @@ -475,8 +479,9 @@ def start_simulation( return state @classmethod - def _monitor_simulation(cls, simulation_id: str): + def _monitor_simulation(cls, simulation_id: str, locale: str = 'zh'): """监控模拟进程,解析动作日志""" + set_locale(locale) sim_dir = os.path.join(cls.RUN_STATE_DIR, simulation_id) # 新的日志结构:分平台的动作日志 diff --git a/backend/app/services/zep_graph_memory_updater.py b/backend/app/services/zep_graph_memory_updater.py index a8f3cecd9..e034fee2b 100644 --- a/backend/app/services/zep_graph_memory_updater.py +++ b/backend/app/services/zep_graph_memory_updater.py @@ -16,6 +16,7 @@ from ..config import Config from ..utils.logger import get_logger +from ..utils.locale import get_locale, set_locale logger = get_logger('mirofish.zep_graph_memory_updater') @@ -275,10 +276,14 @@ def start(self): """启动后台工作线程""" if self._running: return - + + # Capture locale before spawning background thread + current_locale = get_locale() + self._running = True self._worker_thread = threading.Thread( target=self._worker_loop, + args=(current_locale,), daemon=True, name=f"ZepMemoryUpdater-{self.graph_id[:8]}" ) @@ -356,8 +361,9 @@ def add_activity_from_dict(self, data: Dict[str, Any], platform: str): self.add_activity(activity) - def _worker_loop(self): + def _worker_loop(self, locale: str = 'zh'): """后台工作循环 - 按平台批量发送活动到Zep""" + set_locale(locale) while self._running or not self._activity_queue.empty(): try: # 尝试从队列获取活动(超时1秒) diff --git a/backend/app/services/zep_tools.py b/backend/app/services/zep_tools.py index 384cf540f..3bc8a57ab 100644 --- a/backend/app/services/zep_tools.py +++ b/backend/app/services/zep_tools.py @@ -18,6 +18,7 @@ from ..config import Config from ..utils.logger import get_logger from ..utils.llm_client import LLMClient +from ..utils.locale import get_locale, t from ..utils.zep_paging import fetch_all_nodes, fetch_all_edges logger = get_logger('mirofish.zep_tools') @@ -429,7 +430,7 @@ def __init__(self, api_key: Optional[str] = None, llm_client: Optional[LLMClient self.client = Zep(api_key=self.api_key) # LLM客户端用于InsightForge生成子问题 self._llm_client = llm_client - logger.info("ZepToolsService 初始化完成") + logger.info(t("console.zepToolsInitialized")) @property def llm(self) -> LLMClient: @@ -451,13 +452,12 @@ def _call_with_retry(self, func, operation_name: str, max_retries: int = None): last_exception = e if attempt < max_retries - 1: logger.warning( - f"Zep {operation_name} 第 {attempt + 1} 次尝试失败: {str(e)[:100]}, " - f"{delay:.1f}秒后重试..." + t("console.zepRetryAttempt", operation=operation_name, attempt=attempt + 1, error=str(e)[:100], delay=f"{delay:.1f}") ) time.sleep(delay) delay *= 2 else: - logger.error(f"Zep {operation_name} 在 {max_retries} 次尝试后仍失败: {str(e)}") + logger.error(t("console.zepAllRetriesFailed", operation=operation_name, retries=max_retries, error=str(e))) raise last_exception @@ -483,7 +483,7 @@ def search_graph( Returns: SearchResult: 搜索结果 """ - logger.info(f"图谱搜索: graph_id={graph_id}, query={query[:50]}...") + logger.info(t("console.graphSearch", graphId=graph_id, query=query[:50])) # 尝试使用Zep Cloud Search API try: @@ -495,7 +495,7 @@ def search_graph( scope=scope, reranker="cross_encoder" ), - operation_name=f"图谱搜索(graph={graph_id})" + operation_name=t("console.graphSearchOp", graphId=graph_id) ) facts = [] @@ -528,7 +528,7 @@ def search_graph( if hasattr(node, 'summary') and node.summary: facts.append(f"[{node.name}]: {node.summary}") - logger.info(f"搜索完成: 找到 {len(facts)} 条相关事实") + logger.info(t("console.searchComplete", count=len(facts))) return SearchResult( facts=facts, @@ -539,7 +539,7 @@ def search_graph( ) except Exception as e: - logger.warning(f"Zep Search API失败,降级为本地搜索: {str(e)}") + logger.warning(t("console.zepSearchApiFallback", error=str(e))) # 降级:使用本地关键词匹配搜索 return self._local_search(graph_id, query, limit, scope) @@ -564,7 +564,7 @@ def _local_search( Returns: SearchResult: 搜索结果 """ - logger.info(f"使用本地搜索: query={query[:30]}...") + logger.info(t("console.usingLocalSearch", query=query[:30])) facts = [] edges_result = [] @@ -634,10 +634,10 @@ def match_score(text: str) -> int: if node.summary: facts.append(f"[{node.name}]: {node.summary}") - logger.info(f"本地搜索完成: 找到 {len(facts)} 条相关事实") + logger.info(t("console.localSearchComplete", count=len(facts))) except Exception as e: - logger.error(f"本地搜索失败: {str(e)}") + logger.error(t("console.localSearchFailed", error=str(e))) return SearchResult( facts=facts, @@ -657,7 +657,7 @@ def get_all_nodes(self, graph_id: str) -> List[NodeInfo]: Returns: 节点列表 """ - logger.info(f"获取图谱 {graph_id} 的所有节点...") + logger.info(t("console.fetchingAllNodes", graphId=graph_id)) nodes = fetch_all_nodes(self.client, graph_id) @@ -672,7 +672,7 @@ def get_all_nodes(self, graph_id: str) -> List[NodeInfo]: attributes=node.attributes or {} )) - logger.info(f"获取到 {len(result)} 个节点") + logger.info(t("console.fetchedNodes", count=len(result))) return result def get_all_edges(self, graph_id: str, include_temporal: bool = True) -> List[EdgeInfo]: @@ -686,7 +686,7 @@ def get_all_edges(self, graph_id: str, include_temporal: bool = True) -> List[Ed Returns: 边列表(包含created_at, valid_at, invalid_at, expired_at) """ - logger.info(f"获取图谱 {graph_id} 的所有边...") + logger.info(t("console.fetchingAllEdges", graphId=graph_id)) edges = fetch_all_edges(self.client, graph_id) @@ -710,7 +710,7 @@ def get_all_edges(self, graph_id: str, include_temporal: bool = True) -> List[Ed result.append(edge_info) - logger.info(f"获取到 {len(result)} 条边") + logger.info(t("console.fetchedEdges", count=len(result))) return result def get_node_detail(self, node_uuid: str) -> Optional[NodeInfo]: @@ -723,12 +723,12 @@ def get_node_detail(self, node_uuid: str) -> Optional[NodeInfo]: Returns: 节点信息或None """ - logger.info(f"获取节点详情: {node_uuid[:8]}...") + logger.info(t("console.fetchingNodeDetail", uuid=node_uuid[:8])) try: node = self._call_with_retry( func=lambda: self.client.graph.node.get(uuid_=node_uuid), - operation_name=f"获取节点详情(uuid={node_uuid[:8]}...)" + operation_name=t("console.fetchNodeDetailOp", uuid=node_uuid[:8]) ) if not node: @@ -742,7 +742,7 @@ def get_node_detail(self, node_uuid: str) -> Optional[NodeInfo]: attributes=node.attributes or {} ) except Exception as e: - logger.error(f"获取节点详情失败: {str(e)}") + logger.error(t("console.fetchNodeDetailFailed", error=str(e))) return None def get_node_edges(self, graph_id: str, node_uuid: str) -> List[EdgeInfo]: @@ -758,7 +758,7 @@ def get_node_edges(self, graph_id: str, node_uuid: str) -> List[EdgeInfo]: Returns: 边列表 """ - logger.info(f"获取节点 {node_uuid[:8]}... 的相关边") + logger.info(t("console.fetchingNodeEdges", uuid=node_uuid[:8])) try: # 获取图谱所有边,然后过滤 @@ -770,11 +770,11 @@ def get_node_edges(self, graph_id: str, node_uuid: str) -> List[EdgeInfo]: if edge.source_node_uuid == node_uuid or edge.target_node_uuid == node_uuid: result.append(edge) - logger.info(f"找到 {len(result)} 条与节点相关的边") + logger.info(t("console.foundNodeEdges", count=len(result))) return result except Exception as e: - logger.warning(f"获取节点边失败: {str(e)}") + logger.warning(t("console.fetchNodeEdgesFailed", error=str(e))) return [] def get_entities_by_type( @@ -792,7 +792,7 @@ def get_entities_by_type( Returns: 符合类型的实体列表 """ - logger.info(f"获取类型为 {entity_type} 的实体...") + logger.info(t("console.fetchingEntitiesByType", type=entity_type)) all_nodes = self.get_all_nodes(graph_id) @@ -802,7 +802,7 @@ def get_entities_by_type( if entity_type in node.labels: filtered.append(node) - logger.info(f"找到 {len(filtered)} 个 {entity_type} 类型的实体") + logger.info(t("console.foundEntitiesByType", count=len(filtered), type=entity_type)) return filtered def get_entity_summary( @@ -822,7 +822,7 @@ def get_entity_summary( Returns: 实体摘要信息 """ - logger.info(f"获取实体 {entity_name} 的关系摘要...") + logger.info(t("console.fetchingEntitySummary", name=entity_name)) # 先搜索该实体相关的信息 search_result = self.search_graph( @@ -862,7 +862,7 @@ def get_graph_statistics(self, graph_id: str) -> Dict[str, Any]: Returns: 统计信息 """ - logger.info(f"获取图谱 {graph_id} 的统计信息...") + logger.info(t("console.fetchingGraphStats", graphId=graph_id)) nodes = self.get_all_nodes(graph_id) edges = self.get_all_edges(graph_id) @@ -906,7 +906,7 @@ def get_simulation_context( Returns: 模拟上下文信息 """ - logger.info(f"获取模拟上下文: {simulation_requirement[:50]}...") + logger.info(t("console.fetchingSimContext", requirement=simulation_requirement[:50])) # 搜索与模拟需求相关的信息 search_result = self.search_graph( @@ -970,7 +970,7 @@ def insight_forge( Returns: InsightForgeResult: 深度洞察检索结果 """ - logger.info(f"InsightForge 深度洞察检索: {query[:50]}...") + logger.info(t("console.insightForgeStart", query=query[:50])) result = InsightForgeResult( query=query, @@ -986,7 +986,7 @@ def insight_forge( max_queries=max_sub_queries ) result.sub_queries = sub_queries - logger.info(f"生成 {len(sub_queries)} 个子问题") + logger.info(t("console.generatedSubQueries", count=len(sub_queries))) # Step 2: 对每个子问题进行语义搜索 all_facts = [] @@ -1086,7 +1086,7 @@ def insight_forge( result.relationship_chains = relationship_chains result.total_relationships = len(relationship_chains) - logger.info(f"InsightForge完成: {result.total_facts}条事实, {result.total_entities}个实体, {result.total_relationships}条关系") + logger.info(t("console.insightForgeComplete", facts=result.total_facts, entities=result.total_entities, relationships=result.total_relationships)) return result def _generate_sub_queries( @@ -1133,7 +1133,7 @@ def _generate_sub_queries( return [str(sq) for sq in sub_queries[:max_queries]] except Exception as e: - logger.warning(f"生成子问题失败: {str(e)},使用默认子问题") + logger.warning(t("console.generateSubQueriesFailed", error=str(e))) # 降级:返回基于原问题的变体 return [ query, @@ -1168,7 +1168,7 @@ def panorama_search( Returns: PanoramaResult: 广度搜索结果 """ - logger.info(f"PanoramaSearch 广度搜索: {query[:50]}...") + logger.info(t("console.panoramaSearchStart", query=query[:50])) result = PanoramaResult(query=query) @@ -1231,7 +1231,7 @@ def relevance_score(fact: str) -> int: result.active_count = len(active_facts) result.historical_count = len(historical_facts) - logger.info(f"PanoramaSearch完成: {result.active_count}条有效, {result.historical_count}条历史") + logger.info(t("console.panoramaSearchComplete", active=result.active_count, historical=result.historical_count)) return result def quick_search( @@ -1256,7 +1256,7 @@ def quick_search( Returns: SearchResult: 搜索结果 """ - logger.info(f"QuickSearch 简单搜索: {query[:50]}...") + logger.info(t("console.quickSearchStart", query=query[:50])) # 直接调用现有的search_graph方法 result = self.search_graph( @@ -1266,7 +1266,7 @@ def quick_search( scope="edges" ) - logger.info(f"QuickSearch完成: {result.total_count}条结果") + logger.info(t("console.quickSearchComplete", count=result.total_count)) return result def interview_agents( @@ -1306,7 +1306,7 @@ def interview_agents( """ from .simulation_runner import SimulationRunner - logger.info(f"InterviewAgents 深度采访(真实API): {interview_requirement[:50]}...") + logger.info(t("console.interviewAgentsStart", requirement=interview_requirement[:50])) result = InterviewResult( interview_topic=interview_requirement, @@ -1317,12 +1317,12 @@ def interview_agents( profiles = self._load_agent_profiles(simulation_id) if not profiles: - logger.warning(f"未找到模拟 {simulation_id} 的人设文件") + logger.warning(t("console.profilesNotFound", simId=simulation_id)) result.summary = "未找到可采访的Agent人设文件" return result result.total_agents = len(profiles) - logger.info(f"加载到 {len(profiles)} 个Agent人设") + logger.info(t("console.loadedProfiles", count=len(profiles))) # Step 2: 使用LLM选择要采访的Agent(返回agent_id列表) selected_agents, selected_indices, selection_reasoning = self._select_agents_for_interview( @@ -1334,7 +1334,7 @@ def interview_agents( result.selected_agents = selected_agents result.selection_reasoning = selection_reasoning - logger.info(f"选择了 {len(selected_agents)} 个Agent进行采访: {selected_indices}") + logger.info(t("console.selectedAgentsForInterview", count=len(selected_agents), indices=selected_indices)) # Step 3: 生成采访问题(如果没有提供) if not result.interview_questions: @@ -1343,7 +1343,7 @@ def interview_agents( simulation_requirement=simulation_requirement, selected_agents=selected_agents ) - logger.info(f"生成了 {len(result.interview_questions)} 个采访问题") + logger.info(t("console.generatedInterviewQuestions", count=len(result.interview_questions))) # 将问题合并为一个采访prompt combined_prompt = "\n".join([f"{i+1}. {q}" for i, q in enumerate(result.interview_questions)]) @@ -1373,7 +1373,7 @@ def interview_agents( # 不指定platform,API会在twitter和reddit两个平台都采访 }) - logger.info(f"调用批量采访API(双平台): {len(interviews_request)} 个Agent") + logger.info(t("console.callingBatchInterviewApi", count=len(interviews_request))) # 调用 SimulationRunner 的批量采访方法(不传platform,双平台采访) api_result = SimulationRunner.interview_agents_batch( @@ -1383,12 +1383,12 @@ def interview_agents( timeout=180.0 # 双平台需要更长超时 ) - logger.info(f"采访API返回: {api_result.get('interviews_count', 0)} 个结果, success={api_result.get('success')}") + logger.info(t("console.interviewApiReturned", count=api_result.get('interviews_count', 0), success=api_result.get('success'))) # 检查API调用是否成功 if not api_result.get("success", False): error_msg = api_result.get("error", "未知错误") - logger.warning(f"采访API返回失败: {error_msg}") + logger.warning(t("console.interviewApiReturnedFailure", error=error_msg)) result.summary = f"采访API调用失败:{error_msg}。请检查OASIS模拟环境状态。" return result @@ -1461,11 +1461,11 @@ def interview_agents( except ValueError as e: # 模拟环境未运行 - logger.warning(f"采访API调用失败(环境未运行?): {e}") + logger.warning(t("console.interviewApiCallFailed", error=e)) result.summary = f"采访失败:{str(e)}。模拟环境可能已关闭,请确保OASIS环境正在运行。" return result except Exception as e: - logger.error(f"采访API调用异常: {e}") + logger.error(t("console.interviewApiCallException", error=e)) import traceback logger.error(traceback.format_exc()) result.summary = f"采访过程发生错误:{str(e)}" @@ -1478,7 +1478,7 @@ def interview_agents( interview_requirement=interview_requirement ) - logger.info(f"InterviewAgents完成: 采访了 {result.interviewed_count} 个Agent(双平台)") + logger.info(t("console.interviewAgentsComplete", count=result.interviewed_count)) return result @staticmethod @@ -1521,10 +1521,10 @@ def _load_agent_profiles(self, simulation_id: str) -> List[Dict[str, Any]]: try: with open(reddit_profile_path, 'r', encoding='utf-8') as f: profiles = json.load(f) - logger.info(f"从 reddit_profiles.json 加载了 {len(profiles)} 个人设") + logger.info(t("console.loadedRedditProfiles", count=len(profiles))) return profiles except Exception as e: - logger.warning(f"读取 reddit_profiles.json 失败: {e}") + logger.warning(t("console.readRedditProfilesFailed", error=e)) # 尝试读取Twitter CSV格式 twitter_profile_path = os.path.join(sim_dir, "twitter_profiles.csv") @@ -1541,10 +1541,10 @@ def _load_agent_profiles(self, simulation_id: str) -> List[Dict[str, Any]]: "persona": row.get("user_char", ""), "profession": "未知" }) - logger.info(f"从 twitter_profiles.csv 加载了 {len(profiles)} 个人设") + logger.info(t("console.loadedTwitterProfiles", count=len(profiles))) return profiles except Exception as e: - logger.warning(f"读取 twitter_profiles.csv 失败: {e}") + logger.warning(t("console.readTwitterProfilesFailed", error=e)) return profiles @@ -1625,7 +1625,7 @@ def _select_agents_for_interview( return selected_agents, valid_indices, reasoning except Exception as e: - logger.warning(f"LLM选择Agent失败,使用默认选择: {e}") + logger.warning(t("console.llmSelectAgentFailed", error=e)) # 降级:选择前N个 selected = profiles[:max_agents] indices = list(range(min(max_agents, len(profiles)))) @@ -1673,7 +1673,7 @@ def _generate_interview_questions( return response.get("questions", [f"关于{interview_requirement},您有什么看法?"]) except Exception as e: - logger.warning(f"生成采访问题失败: {e}") + logger.warning(t("console.generateInterviewQuestionsFailed", error=e)) return [ f"关于{interview_requirement},您的观点是什么?", "这件事对您或您所代表的群体有什么影响?", @@ -1695,7 +1695,8 @@ def _generate_interview_summary( for interview in interviews: interview_texts.append(f"【{interview.agent_name}({interview.agent_role})】\n{interview.response[:500]}") - system_prompt = """你是一个专业的新闻编辑。请根据多位受访者的回答,生成一份采访摘要。 + quote_instruction = "引用受访者原话时使用中文引号「」" if get_locale() == 'zh' else 'Use quotation marks "" when quoting interviewees' + system_prompt = f"""你是一个专业的新闻编辑。请根据多位受访者的回答,生成一份采访摘要。 摘要要求: 1. 提炼各方主要观点 @@ -1708,7 +1709,7 @@ def _generate_interview_summary( - 使用纯文本段落,用空行分隔不同部分 - 不要使用Markdown标题(如#、##、###) - 不要使用分割线(如---、***) -- 引用受访者原话时使用中文引号「」 +- {quote_instruction} - 可以使用**加粗**标记关键词,但不要使用其他Markdown语法""" user_prompt = f"""采访主题:{interview_requirement} @@ -1730,6 +1731,6 @@ def _generate_interview_summary( return summary except Exception as e: - logger.warning(f"生成采访摘要失败: {e}") + logger.warning(t("console.generateInterviewSummaryFailed", error=e)) # 降级:简单拼接 return f"共采访了{len(interviews)}位受访者,包括:" + "、".join([i.agent_name for i in interviews]) diff --git a/backend/app/utils/__init__.py b/backend/app/utils/__init__.py index 5848792b8..e70161acb 100644 --- a/backend/app/utils/__init__.py +++ b/backend/app/utils/__init__.py @@ -4,6 +4,7 @@ from .file_parser import FileParser from .llm_client import LLMClient +from .locale import t, get_locale, set_locale, get_language_instruction -__all__ = ['FileParser', 'LLMClient'] +__all__ = ['FileParser', 'LLMClient', 't', 'get_locale', 'set_locale', 'get_language_instruction'] diff --git a/backend/app/utils/locale.py b/backend/app/utils/locale.py new file mode 100644 index 000000000..23d04aa9d --- /dev/null +++ b/backend/app/utils/locale.py @@ -0,0 +1,69 @@ +import json +import os +import threading +from flask import request, has_request_context + +_thread_local = threading.local() + +_locales_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'locales') + +# Load language registry +with open(os.path.join(_locales_dir, 'languages.json'), 'r', encoding='utf-8') as f: + _languages = json.load(f) + +# Load translation files +_translations = {} +for filename in os.listdir(_locales_dir): + if filename.endswith('.json') and filename != 'languages.json': + locale_name = filename[:-5] + with open(os.path.join(_locales_dir, filename), 'r', encoding='utf-8') as f: + _translations[locale_name] = json.load(f) + + +def set_locale(locale: str): + """Set locale for current thread. Call at the start of background threads.""" + _thread_local.locale = locale + + +def get_locale() -> str: + if has_request_context(): + raw = request.headers.get('Accept-Language', 'zh') + return raw if raw in _translations else 'zh' + return getattr(_thread_local, 'locale', 'zh') + + +def t(key: str, **kwargs) -> str: + locale = get_locale() + messages = _translations.get(locale, _translations.get('zh', {})) + + value = messages + for part in key.split('.'): + if isinstance(value, dict): + value = value.get(part) + else: + value = None + break + + if value is None: + value = _translations.get('zh', {}) + for part in key.split('.'): + if isinstance(value, dict): + value = value.get(part) + else: + value = None + break + + if value is None: + return key + + if kwargs: + for k, v in kwargs.items(): + value = value.replace(f'{{{k}}}', str(v)) + + return value + + +def get_language_instruction() -> str: + locale = get_locale() + lang_config = _languages.get(locale, _languages.get('zh', {})) + return lang_config.get('llmInstruction', '请使用中文回答。') diff --git a/frontend/index.html b/frontend/index.html index 009c924a4..0b80095c6 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -1,6 +1,7 @@ - +
+ diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 8c4fa710d..63d375502 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -11,6 +11,7 @@ "axios": "^1.13.2", "d3": "^7.9.0", "vue": "^3.5.24", + "vue-i18n": "^11.3.0", "vue-router": "^4.6.3" }, "devDependencies": { @@ -506,6 +507,67 @@ "node": ">=18" } }, + "node_modules/@intlify/core-base": { + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-11.3.0.tgz", + "integrity": "sha512-NNX5jIwF4TJBe7RtSKDMOA6JD9mp2mRcBHAwt2X+Q8PvnZub0yj5YYXlFu2AcESdgQpEv/5Yx2uOCV/yh7YkZg==", + "license": "MIT", + "dependencies": { + "@intlify/devtools-types": "11.3.0", + "@intlify/message-compiler": "11.3.0", + "@intlify/shared": "11.3.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/devtools-types": { + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/@intlify/devtools-types/-/devtools-types-11.3.0.tgz", + "integrity": "sha512-G9CNL4WpANWVdUjubOIIS7/D2j/0j+1KJmhBJxHilWNKr9mmt3IjFV3Hq4JoBP23uOoC5ynxz/FHZ42M+YxfGw==", + "license": "MIT", + "dependencies": { + "@intlify/core-base": "11.3.0", + "@intlify/shared": "11.3.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/message-compiler": { + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-11.3.0.tgz", + "integrity": "sha512-RAJp3TMsqohg/Wa7bVF3cChRhecSYBLrTCQSj7j0UtWVFLP+6iEJoE2zb7GU5fp+fmG5kCbUdzhmlAUCWXiUJw==", + "license": "MIT", + "dependencies": { + "@intlify/shared": "11.3.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/shared": { + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-11.3.0.tgz", + "integrity": "sha512-LC6P/uay7rXL5zZ5+5iRJfLs/iUN8apu9tm8YqQVmW3Uq3X4A0dOFUIDuAmB7gAC29wTHOS3EiN/IosNSz0eNQ==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", @@ -2035,6 +2097,27 @@ } } }, + "node_modules/vue-i18n": { + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/vue-i18n/-/vue-i18n-11.3.0.tgz", + "integrity": "sha512-1J+xDfDJTLhDxElkd3+XUhT7FYSZd2b8pa7IRKGxhWH/8yt6PTvi3xmWhGwhYT5EaXdatui11pF2R6tL73/zPA==", + "license": "MIT", + "dependencies": { + "@intlify/core-base": "11.3.0", + "@intlify/devtools-types": "11.3.0", + "@intlify/shared": "11.3.0", + "@vue/devtools-api": "^6.5.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + }, + "peerDependencies": { + "vue": "^3.0.0" + } + }, "node_modules/vue-router": { "version": "4.6.3", "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.3.tgz", diff --git a/frontend/package.json b/frontend/package.json index f7e995a14..36ba639b5 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -12,6 +12,7 @@ "axios": "^1.13.2", "d3": "^7.9.0", "vue": "^3.5.24", + "vue-i18n": "^11.3.0", "vue-router": "^4.6.3" }, "devDependencies": { diff --git a/frontend/src/api/index.js b/frontend/src/api/index.js index e2d9465b2..e840e1166 100644 --- a/frontend/src/api/index.js +++ b/frontend/src/api/index.js @@ -1,4 +1,5 @@ import axios from 'axios' +import i18n from '../i18n' // 创建axios实例 const service = axios.create({ @@ -12,6 +13,7 @@ const service = axios.create({ // 请求拦截器 service.interceptors.request.use( config => { + config.headers['Accept-Language'] = i18n.global.locale.value return config }, error => { diff --git a/frontend/src/components/GraphPanel.vue b/frontend/src/components/GraphPanel.vue index 314c966e4..db1882982 100644 --- a/frontend/src/components/GraphPanel.vue +++ b/frontend/src/components/GraphPanel.vue @@ -1,14 +1,14 @@POST /api/graph/ontology/generate
- LLM分析文档内容与模拟需求,提取出现实种子,自动生成合适的本体结构 + {{ $t('step1.ontologyDesc') }}
POST /api/graph/build
- 基于生成的本体,将文档自动分块后调用 Zep 构建知识图谱,提取实体和关系,并形成时序记忆与社区摘要 + {{ $t('step1.graphRagDesc') }}
POST /api/simulation/create
-图谱构建已完成,请进入下一步进行模拟环境搭建
+{{ $t('step1.buildCompleteDesc') }}