diff --git a/.env.example b/.env.example
index 78a3b72c0..bd8694118 100644
--- a/.env.example
+++ b/.env.example
@@ -1,10 +1,15 @@
-# LLM API配置(支持 OpenAI SDK 格式的任意 LLM API)
-# 推荐使用阿里百炼平台qwen-plus模型:https://bailian.console.aliyun.com/
-# 注意消耗较大,可先进行小于40轮的模拟尝试
+# LLM API Configuration (any OpenAI-SDK-compatible API)
+# Recommended: Alibaba qwen-plus via Bailian Platform (cost-effective for high volume):
+# https://bailian.console.aliyun.com/
+# Also works with: OpenAI (gpt-4o-mini), Anthropic (via compatible proxy), etc.
+# Note: token usage is high — start with fewer than 40 simulation rounds.
LLM_API_KEY=your_api_key_here
LLM_BASE_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
LLM_MODEL_NAME=qwen-plus
+# CORS — comma-separated allowed origins (set to your frontend domain in production)
+CORS_ORIGINS=http://localhost:5173,http://localhost:5001
+
# ===== ZEP记忆图谱配置 =====
# 每月免费额度即可支撑简单使用:https://app.getzep.com/
ZEP_API_KEY=your_zep_api_key_here
diff --git a/backend/app/__init__.py b/backend/app/__init__.py
index aba624bba..08984919d 100644
--- a/backend/app/__init__.py
+++ b/backend/app/__init__.py
@@ -39,8 +39,8 @@ def create_app(config_class=Config):
logger.info("MiroFish Backend 启动中...")
logger.info("=" * 50)
- # 启用CORS
- CORS(app, resources={r"/api/*": {"origins": "*"}})
+ # 启用CORS — 限制为配置的允许来源,而非通配符 "*"
+ CORS(app, resources={r"/api/*": {"origins": config_class.CORS_ORIGINS}})
# 注册模拟进程清理函数(确保服务器关闭时终止所有模拟进程)
from .services.simulation_runner import SimulationRunner
diff --git a/backend/app/api/graph.py b/backend/app/api/graph.py
index 12ff1ba2d..038ea990d 100644
--- a/backend/app/api/graph.py
+++ b/backend/app/api/graph.py
@@ -249,8 +249,7 @@ def generate_ontology():
except Exception as e:
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -489,7 +488,7 @@ def wait_progress_callback(msg, progress_ratio):
except Exception as e:
# 更新项目状态为失败
- build_logger.error(f"[{task_id}] 图谱构建失败: {str(e)}")
+ build_logger.error(f"[{task_id}] 图谱构建失败: {str(e)}", exc_info=True)
build_logger.debug(traceback.format_exc())
project.status = ProjectStatus.FAILED
@@ -519,8 +518,7 @@ def wait_progress_callback(msg, progress_ratio):
except Exception as e:
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -584,8 +582,7 @@ def get_graph_data(graph_id: str):
except Exception as e:
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -612,6 +609,5 @@ def delete_graph(graph_id: str):
except Exception as e:
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
diff --git a/backend/app/api/report.py b/backend/app/api/report.py
index e05c73c39..e0c6dfe4f 100644
--- a/backend/app/api/report.py
+++ b/backend/app/api/report.py
@@ -4,7 +4,6 @@
"""
import os
-import traceback
import threading
from flask import request, jsonify, send_file
@@ -167,7 +166,7 @@ def progress_callback(stage, progress, message):
task_manager.fail_task(task_id, report.error or "报告生成失败")
except Exception as e:
- logger.error(f"报告生成失败: {str(e)}")
+ logger.error(f"报告生成失败: {str(e)}", exc_info=True)
task_manager.fail_task(task_id, str(e))
# 启动后台线程
@@ -187,11 +186,10 @@ def progress_callback(stage, progress, message):
})
except Exception as e:
- logger.error(f"启动报告生成任务失败: {str(e)}")
+ logger.error(f"启动报告生成任务失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -260,7 +258,7 @@ def get_generate_status():
})
except Exception as e:
- logger.error(f"查询任务状态失败: {str(e)}")
+ logger.error(f"查询任务状态失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
"error": str(e)
@@ -303,11 +301,10 @@ def get_report(report_id: str):
})
except Exception as e:
- logger.error(f"获取报告失败: {str(e)}")
+ logger.error(f"获取报告失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -342,11 +339,10 @@ def get_report_by_simulation(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取报告失败: {str(e)}")
+ logger.error(f"获取报告失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -382,11 +378,10 @@ def list_reports():
})
except Exception as e:
- logger.error(f"列出报告失败: {str(e)}")
+ logger.error(f"列出报告失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -428,11 +423,10 @@ def download_report(report_id: str):
)
except Exception as e:
- logger.error(f"下载报告失败: {str(e)}")
+ logger.error(f"下载报告失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -454,11 +448,10 @@ def delete_report(report_id: str):
})
except Exception as e:
- logger.error(f"删除报告失败: {str(e)}")
+ logger.error(f"删除报告失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -551,11 +544,10 @@ def chat_with_report_agent():
})
except Exception as e:
- logger.error(f"对话失败: {str(e)}")
+ logger.error(f"对话失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -594,11 +586,10 @@ def get_report_progress(report_id: str):
})
except Exception as e:
- logger.error(f"获取报告进度失败: {str(e)}")
+ logger.error(f"获取报告进度失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -645,11 +636,10 @@ def get_report_sections(report_id: str):
})
except Exception as e:
- logger.error(f"获取章节列表失败: {str(e)}")
+ logger.error(f"获取章节列表失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -689,11 +679,10 @@ def get_single_section(report_id: str, section_index: int):
})
except Exception as e:
- logger.error(f"获取章节内容失败: {str(e)}")
+ logger.error(f"获取章节内容失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -740,11 +729,10 @@ def check_report_status(simulation_id: str):
})
except Exception as e:
- logger.error(f"检查报告状态失败: {str(e)}")
+ logger.error(f"检查报告状态失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -801,11 +789,10 @@ def get_agent_log(report_id: str):
})
except Exception as e:
- logger.error(f"获取Agent日志失败: {str(e)}")
+ logger.error(f"获取Agent日志失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -835,11 +822,10 @@ def stream_agent_log(report_id: str):
})
except Exception as e:
- logger.error(f"获取Agent日志失败: {str(e)}")
+ logger.error(f"获取Agent日志失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -883,11 +869,10 @@ def get_console_log(report_id: str):
})
except Exception as e:
- logger.error(f"获取控制台日志失败: {str(e)}")
+ logger.error(f"获取控制台日志失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -917,11 +902,10 @@ def stream_console_log(report_id: str):
})
except Exception as e:
- logger.error(f"获取控制台日志失败: {str(e)}")
+ logger.error(f"获取控制台日志失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -967,11 +951,10 @@ def search_graph_tool():
})
except Exception as e:
- logger.error(f"图谱搜索失败: {str(e)}")
+ logger.error(f"图谱搜索失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1007,9 +990,8 @@ def get_graph_statistics_tool():
})
except Exception as e:
- logger.error(f"获取图谱统计失败: {str(e)}")
+ logger.error(f"获取图谱统计失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
diff --git a/backend/app/api/simulation.py b/backend/app/api/simulation.py
index 3a0f68168..d3a094ffd 100644
--- a/backend/app/api/simulation.py
+++ b/backend/app/api/simulation.py
@@ -4,7 +4,6 @@
"""
import os
-import traceback
from flask import request, jsonify, send_file
from . import simulation_bp
@@ -81,11 +80,10 @@ def get_graph_entities(graph_id: str):
})
except Exception as e:
- logger.error(f"获取图谱实体失败: {str(e)}")
+ logger.error(f"获取图谱实体失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -114,11 +112,10 @@ def get_entity_detail(graph_id: str, entity_uuid: str):
})
except Exception as e:
- logger.error(f"获取实体详情失败: {str(e)}")
+ logger.error(f"获取实体详情失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -151,11 +148,10 @@ def get_entities_by_type(graph_id: str, entity_type: str):
})
except Exception as e:
- logger.error(f"获取实体失败: {str(e)}")
+ logger.error(f"获取实体失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -228,11 +224,10 @@ def create_simulation():
})
except Exception as e:
- logger.error(f"创建模拟失败: {str(e)}")
+ logger.error(f"创建模拟失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -592,7 +587,7 @@ def progress_callback(stage, progress, message, **kwargs):
)
except Exception as e:
- logger.error(f"准备模拟失败: {str(e)}")
+ logger.error(f"准备模拟失败: {str(e)}", exc_info=True)
task_manager.fail_task(task_id, str(e))
# 更新模拟状态为失败
@@ -626,11 +621,10 @@ def progress_callback(stage, progress, message, **kwargs):
}), 404
except Exception as e:
- logger.error(f"启动准备任务失败: {str(e)}")
+ logger.error(f"启动准备任务失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -740,7 +734,7 @@ def get_prepare_status():
})
except Exception as e:
- logger.error(f"查询任务状态失败: {str(e)}")
+ logger.error(f"查询任务状态失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
"error": str(e)
@@ -772,11 +766,10 @@ def get_simulation(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取模拟状态失败: {str(e)}")
+ logger.error(f"获取模拟状态失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -801,11 +794,10 @@ def list_simulations():
})
except Exception as e:
- logger.error(f"列出模拟失败: {str(e)}")
+ logger.error(f"列出模拟失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -974,11 +966,10 @@ def get_simulation_history():
})
except Exception as e:
- logger.error(f"获取历史模拟失败: {str(e)}")
+ logger.error(f"获取历史模拟失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1012,11 +1003,10 @@ def get_simulation_profiles(simulation_id: str):
}), 404
except Exception as e:
- logger.error(f"获取Profile失败: {str(e)}")
+ logger.error(f"获取Profile失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1122,11 +1112,10 @@ def get_simulation_profiles_realtime(simulation_id: str):
})
except Exception as e:
- logger.error(f"实时获取Profile失败: {str(e)}")
+ logger.error(f"实时获取Profile失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1242,11 +1231,10 @@ def get_simulation_config_realtime(simulation_id: str):
})
except Exception as e:
- logger.error(f"实时获取Config失败: {str(e)}")
+ logger.error(f"实时获取Config失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1278,11 +1266,10 @@ def get_simulation_config(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取配置失败: {str(e)}")
+ logger.error(f"获取配置失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1307,11 +1294,10 @@ def download_simulation_config(simulation_id: str):
)
except Exception as e:
- logger.error(f"下载配置失败: {str(e)}")
+ logger.error(f"下载配置失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1359,11 +1345,10 @@ def download_simulation_script(script_name: str):
)
except Exception as e:
- logger.error(f"下载脚本失败: {str(e)}")
+ logger.error(f"下载脚本失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1433,11 +1418,10 @@ def generate_profiles():
})
except Exception as e:
- logger.error(f"生成Profile失败: {str(e)}")
+ logger.error(f"生成Profile失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1550,7 +1534,7 @@ def start_simulation():
try:
SimulationRunner.stop_simulation(simulation_id)
except Exception as e:
- logger.warning(f"停止模拟时出现警告: {str(e)}")
+ logger.warning(f"停止模拟时出现警告: {str(e)}", exc_info=True)
else:
return jsonify({
"success": False,
@@ -1628,11 +1612,10 @@ def start_simulation():
}), 400
except Exception as e:
- logger.error(f"启动模拟失败: {str(e)}")
+ logger.error(f"启动模拟失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1687,11 +1670,10 @@ def stop_simulation():
}), 400
except Exception as e:
- logger.error(f"停止模拟失败: {str(e)}")
+ logger.error(f"停止模拟失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1747,11 +1729,10 @@ def get_run_status(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取运行状态失败: {str(e)}")
+ logger.error(f"获取运行状态失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1848,11 +1829,10 @@ def get_run_status_detail(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取详细状态失败: {str(e)}")
+ logger.error(f"获取详细状态失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1902,11 +1882,10 @@ def get_simulation_actions(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取动作历史失败: {str(e)}")
+ logger.error(f"获取动作历史失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1942,11 +1921,10 @@ def get_simulation_timeline(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取时间线失败: {str(e)}")
+ logger.error(f"获取时间线失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -1969,11 +1947,10 @@ def get_agent_stats(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取Agent统计失败: {str(e)}")
+ logger.error(f"获取Agent统计失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2049,11 +2026,10 @@ def get_simulation_posts(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取帖子失败: {str(e)}")
+ logger.error(f"获取帖子失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2124,11 +2100,10 @@ def get_simulation_comments(simulation_id: str):
})
except Exception as e:
- logger.error(f"获取评论失败: {str(e)}")
+ logger.error(f"获取评论失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2255,11 +2230,10 @@ def interview_agent():
}), 504
except Exception as e:
- logger.error(f"Interview失败: {str(e)}")
+ logger.error(f"Interview失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2393,11 +2367,10 @@ def interview_agents_batch():
}), 504
except Exception as e:
- logger.error(f"批量Interview失败: {str(e)}")
+ logger.error(f"批量Interview失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2496,11 +2469,10 @@ def interview_all_agents():
}), 504
except Exception as e:
- logger.error(f"全局Interview失败: {str(e)}")
+ logger.error(f"全局Interview失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2568,11 +2540,10 @@ def get_interview_history():
})
except Exception as e:
- logger.error(f"获取Interview历史失败: {str(e)}")
+ logger.error(f"获取Interview历史失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2633,11 +2604,10 @@ def get_env_status():
})
except Exception as e:
- logger.error(f"获取环境状态失败: {str(e)}")
+ logger.error(f"获取环境状态失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
@@ -2703,9 +2673,8 @@ def close_simulation_env():
}), 400
except Exception as e:
- logger.error(f"关闭环境失败: {str(e)}")
+ logger.error(f"关闭环境失败: {str(e)}", exc_info=True)
return jsonify({
"success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
+ "error": str(e)
}), 500
diff --git a/backend/app/config.py b/backend/app/config.py
index 953dfa50a..1504a42c4 100644
--- a/backend/app/config.py
+++ b/backend/app/config.py
@@ -22,7 +22,15 @@ class Config:
# Flask配置
SECRET_KEY = os.environ.get('SECRET_KEY', 'mirofish-secret-key')
- DEBUG = os.environ.get('FLASK_DEBUG', 'True').lower() == 'true'
+ # 生产环境默认关闭 DEBUG,避免暴露内部信息
+ DEBUG = os.environ.get('FLASK_DEBUG', 'False').lower() == 'true'
+
+ # CORS配置 — 逗号分隔的允许来源,生产环境应设置为具体域名
+ CORS_ORIGINS = [
+ o.strip()
+ for o in os.environ.get('CORS_ORIGINS', 'http://localhost:5173,http://localhost:5001').split(',')
+ if o.strip()
+ ]
# JSON配置 - 禁用ASCII转义,让中文直接显示(而不是 \uXXXX 格式)
JSON_AS_ASCII = False
diff --git a/backend/app/services/oasis_profile_generator.py b/backend/app/services/oasis_profile_generator.py
index 57836c539..33104840d 100644
--- a/backend/app/services/oasis_profile_generator.py
+++ b/backend/app/services/oasis_profile_generator.py
@@ -669,8 +669,14 @@ def fix_string_newlines(match):
}
def _get_system_prompt(self, is_individual: bool) -> str:
- """获取系统提示词"""
- base_prompt = "你是社交媒体用户画像生成专家。生成详细、真实的人设用于舆论模拟,最大程度还原已有现实情况。必须返回有效的JSON格式,所有字符串值不能包含未转义的换行符。使用中文。"
+ """Get system prompt for profile generation"""
+ base_prompt = (
+ "You are an expert social media persona designer. "
+ "Generate detailed, realistic profiles for multi-agent simulations. "
+ "Maximize fidelity to the known real-world situation of each entity. "
+ "You must return valid JSON only. All string values must not contain unescaped newlines. "
+ "Write all content in English."
+ )
return base_prompt
def _build_individual_persona_prompt(
@@ -686,40 +692,40 @@ def _build_individual_persona_prompt(
attrs_str = json.dumps(entity_attributes, ensure_ascii=False) if entity_attributes else "无"
context_str = context[:3000] if context else "无额外上下文"
- return f"""为实体生成详细的社交媒体用户人设,最大程度还原已有现实情况。
+ return f"""Generate a detailed social media persona for this entity, faithfully reflecting known real-world information.
-实体名称: {entity_name}
-实体类型: {entity_type}
-实体摘要: {entity_summary}
-实体属性: {attrs_str}
+Entity name: {entity_name}
+Entity type: {entity_type}
+Entity summary: {entity_summary}
+Entity attributes: {attrs_str}
-上下文信息:
+Context:
{context_str}
-请生成JSON,包含以下字段:
+Return a JSON object with the following fields:
-1. bio: 社交媒体简介,200字
-2. persona: 详细人设描述(2000字的纯文本),需包含:
- - 基本信息(年龄、职业、教育背景、所在地)
- - 人物背景(重要经历、与事件的关联、社会关系)
- - 性格特征(MBTI类型、核心性格、情绪表达方式)
- - 社交媒体行为(发帖频率、内容偏好、互动风格、语言特点)
- - 立场观点(对话题的态度、可能被激怒/感动的内容)
- - 独特特征(口头禅、特殊经历、个人爱好)
- - 个人记忆(人设的重要部分,要介绍这个个体与事件的关联,以及这个个体在事件中的已有动作与反应)
-3. age: 年龄数字(必须是整数)
-4. gender: 性别,必须是英文: "male" 或 "female"
-5. mbti: MBTI类型(如INTJ、ENFP等)
-6. country: 国家(使用中文,如"中国")
-7. profession: 职业
-8. interested_topics: 感兴趣话题数组
+1. bio: Social media profile bio, ~200 words
+2. persona: Detailed persona description (~2000 words, plain text), covering:
+ - Background (age, profession, education, location/country)
+ - History (key experiences, relationship to the simulated event, social connections)
+ - Personality (MBTI type, core traits, emotional expression style)
+ - Social media behavior (posting frequency, content preferences, interaction style, language tone)
+ - Stance and views (position on key topics, what would provoke or move them)
+ - Distinctive traits (catchphrases, unique experiences, personal interests)
+ - Personal memory (how this individual is connected to the event, any prior actions or reactions they have taken)
+3. age: Integer age
+4. gender: Must be English: "male", "female", or "other"
+5. mbti: MBTI type (e.g. INTJ, ENFP)
+6. country: Country name in English (e.g. "United States", "Indonesia", "Iran")
+7. profession: Occupation or role
+8. interested_topics: Array of topic strings this persona follows
-重要:
-- 所有字段值必须是字符串或数字,不要使用换行符
-- persona必须是一段连贯的文字描述
-- 使用中文(除了gender字段必须用英文male/female)
-- 内容要与实体信息保持一致
-- age必须是有效的整数,gender必须是"male"或"female"
+Requirements:
+- All field values must be strings or numbers — no unescaped newlines
+- persona must be a single coherent paragraph of flowing prose
+- Write everything in English
+- Content must be consistent with the entity information above
+- age must be a valid integer; gender must be "male", "female", or "other"
"""
def _build_group_persona_prompt(
@@ -735,38 +741,38 @@ def _build_group_persona_prompt(
attrs_str = json.dumps(entity_attributes, ensure_ascii=False) if entity_attributes else "无"
context_str = context[:3000] if context else "无额外上下文"
- return f"""为机构/群体实体生成详细的社交媒体账号设定,最大程度还原已有现实情况。
+ return f"""Generate a detailed social media account profile for this organization or group entity, faithfully reflecting known real-world information.
-实体名称: {entity_name}
-实体类型: {entity_type}
-实体摘要: {entity_summary}
-实体属性: {attrs_str}
+Entity name: {entity_name}
+Entity type: {entity_type}
+Entity summary: {entity_summary}
+Entity attributes: {attrs_str}
-上下文信息:
+Context:
{context_str}
-请生成JSON,包含以下字段:
+Return a JSON object with the following fields:
-1. bio: 官方账号简介,200字,专业得体
-2. persona: 详细账号设定描述(2000字的纯文本),需包含:
- - 机构基本信息(正式名称、机构性质、成立背景、主要职能)
- - 账号定位(账号类型、目标受众、核心功能)
- - 发言风格(语言特点、常用表达、禁忌话题)
- - 发布内容特点(内容类型、发布频率、活跃时间段)
- - 立场态度(对核心话题的官方立场、面对争议的处理方式)
- - 特殊说明(代表的群体画像、运营习惯)
- - 机构记忆(机构人设的重要部分,要介绍这个机构与事件的关联,以及这个机构在事件中的已有动作与反应)
-3. age: 固定填30(机构账号的虚拟年龄)
-4. gender: 固定填"other"(机构账号使用other表示非个人)
-5. mbti: MBTI类型,用于描述账号风格,如ISTJ代表严谨保守
-6. country: 国家(使用中文,如"中国")
-7. profession: 机构职能描述
-8. interested_topics: 关注领域数组
+1. bio: Official account bio, ~200 words, professional and authoritative in tone
+2. persona: Detailed account profile description (~2000 words, plain text), covering:
+ - Organization basics (full official name, nature of the organization, founding background, primary mandate)
+ - Account positioning (account type, target audience, core purpose)
+ - Communication style (language characteristics, typical expressions, topics avoided)
+ - Content patterns (content types, posting frequency, active time periods)
+ - Stance and policy (official position on key topics, how disputes are handled)
+ - Special notes (the demographic this account represents, operational habits)
+ - Organizational memory (how this entity is connected to the simulated event, any prior actions or statements it has made)
+3. age: Use 30 (placeholder for organizational accounts)
+4. gender: Use "other" (organizations are non-personal)
+5. mbti: MBTI type describing the account's communication style (e.g. ISTJ = methodical and conservative)
+6. country: Country name in English (e.g. "United States", "Iran", "Israel")
+7. profession: Description of the organization's function or role
+8. interested_topics: Array of topic domains this organization focuses on
-重要:
-- 所有字段值必须是字符串或数字,不允许null值
-- persona必须是一段连贯的文字描述,不要使用换行符
-- 使用中文(除了gender字段必须用英文"other")
+Requirements:
+- All field values must be strings or numbers — no null values, no unescaped newlines
+- persona must be a single coherent paragraph of flowing prose
+- Write everything in English
- age必须是整数30,gender必须是字符串"other"
- 机构账号发言要符合其身份定位"""
diff --git a/backend/app/services/ontology_generator.py b/backend/app/services/ontology_generator.py
index 2d3e39bd8..dead1f59d 100644
--- a/backend/app/services/ontology_generator.py
+++ b/backend/app/services/ontology_generator.py
@@ -8,150 +8,145 @@
from ..utils.llm_client import LLMClient
-# 本体生成的系统提示词
-ONTOLOGY_SYSTEM_PROMPT = """你是一个专业的知识图谱本体设计专家。你的任务是分析给定的文本内容和模拟需求,设计适合**社交媒体舆论模拟**的实体类型和关系类型。
+# Ontology generation system prompt
+ONTOLOGY_SYSTEM_PROMPT = """You are an expert knowledge graph ontology designer. Your task is to analyze provided text and a simulation goal, then design entity types and relationship types suitable for a **social media stakeholder simulation**.
-**重要:你必须输出有效的JSON格式数据,不要输出任何其他内容。**
+**IMPORTANT: You must output valid JSON only. Do not output any other text.**
-## 核心任务背景
+## Core Task Background
-我们正在构建一个**社交媒体舆论模拟系统**。在这个系统中:
-- 每个实体都是一个可以在社交媒体上发声、互动、传播信息的"账号"或"主体"
-- 实体之间会相互影响、转发、评论、回应
-- 我们需要模拟舆论事件中各方的反应和信息传播路径
+We are building a **multi-agent social media simulation system** to predict real-world outcomes (e.g., Polymarket prediction questions, geopolitical events, public policy impacts). In this system:
+- Each entity is a stakeholder that can speak, interact, and spread information on social media
+- Entities influence each other through posts, replies, shares, and reactions
+- We simulate how different actors respond to an event, then synthesize a prediction
-因此,**实体必须是现实中真实存在的、可以在社媒上发声和互动的主体**:
+Therefore, **entities must be real-world actors that can voice opinions and interact on social media**:
-**可以是**:
-- 具体的个人(公众人物、当事人、意见领袖、专家学者、普通人)
-- 公司、企业(包括其官方账号)
-- 组织机构(大学、协会、NGO、工会等)
-- 政府部门、监管机构
-- 媒体机构(报纸、电视台、自媒体、网站)
-- 社交媒体平台本身
-- 特定群体代表(如校友会、粉丝团、维权群体等)
+**Valid entity types**:
+- Specific individuals (public figures, experts, analysts, journalists, ordinary citizens)
+- Companies and corporations (including their official accounts)
+- Organizations (universities, associations, NGOs, think tanks, lobbying groups)
+- Government bodies and regulatory agencies
+- Military and intelligence entities
+- Media outlets (newspapers, TV channels, online news, podcasts)
+- Market participants (traders, hedge funds, prediction market bettors)
+- Activist and advocacy groups
-**不可以是**:
-- 抽象概念(如"舆论"、"情绪"、"趋势")
-- 主题/话题(如"学术诚信"、"教育改革")
-- 观点/态度(如"支持方"、"反对方")
+**Invalid entity types**:
+- Abstract concepts (e.g., "public opinion", "market sentiment", "geopolitical tension")
+- Topics or themes (e.g., "nuclear proliferation", "economic sanctions")
+- Stances or attitudes (e.g., "hawks", "doves", "pro-war faction")
-## 输出格式
+## Output Format
-请输出JSON格式,包含以下结构:
+Output JSON with the following structure:
```json
{
"entity_types": [
{
- "name": "实体类型名称(英文,PascalCase)",
- "description": "简短描述(英文,不超过100字符)",
+ "name": "EntityTypeName (English, PascalCase)",
+ "description": "Short description (English, max 100 chars)",
"attributes": [
{
- "name": "属性名(英文,snake_case)",
+ "name": "attribute_name (English, snake_case)",
"type": "text",
- "description": "属性描述"
+ "description": "Attribute description"
}
],
- "examples": ["示例实体1", "示例实体2"]
+ "examples": ["Example entity 1", "Example entity 2"]
}
],
"edge_types": [
{
- "name": "关系类型名称(英文,UPPER_SNAKE_CASE)",
- "description": "简短描述(英文,不超过100字符)",
+ "name": "RELATIONSHIP_NAME (English, UPPER_SNAKE_CASE)",
+ "description": "Short description (English, max 100 chars)",
"source_targets": [
- {"source": "源实体类型", "target": "目标实体类型"}
+ {"source": "SourceEntityType", "target": "TargetEntityType"}
],
"attributes": []
}
],
- "analysis_summary": "对文本内容的简要分析说明(中文)"
+ "analysis_summary": "Brief English summary of the text and how it maps to the simulation goal"
}
```
-## 设计指南(极其重要!)
+## Design Guidelines (CRITICAL)
-### 1. 实体类型设计 - 必须严格遵守
+### 1. Entity Type Design — Strict Rules
-**数量要求:必须正好10个实体类型**
+**Count: exactly 10 entity types**
-**层次结构要求(必须同时包含具体类型和兜底类型)**:
+**Hierarchy requirement (must include both specific and fallback types)**:
-你的10个实体类型必须包含以下层次:
+Your 10 entity types must include:
-A. **兜底类型(必须包含,放在列表最后2个)**:
- - `Person`: 任何自然人个体的兜底类型。当一个人不属于其他更具体的人物类型时,归入此类。
- - `Organization`: 任何组织机构的兜底类型。当一个组织不属于其他更具体的组织类型时,归入此类。
+A. **Fallback types (required, placed last in the list)**:
+ - `Person`: Fallback for any individual not matching a more specific type.
+ - `Organization`: Fallback for any organization not matching a more specific type.
-B. **具体类型(8个,根据文本内容设计)**:
- - 针对文本中出现的主要角色,设计更具体的类型
- - 例如:如果文本涉及学术事件,可以有 `Student`, `Professor`, `University`
- - 例如:如果文本涉及商业事件,可以有 `Company`, `CEO`, `Employee`
+B. **Specific types (8 types, designed from the text content)**:
+ - Identify the key stakeholder categories from the documents
+ - For geopolitical events: `GovernmentOfficial`, `Military`, `Diplomat`, `ThinkTank`, `Journalist`
+ - For financial events: `Trader`, `Analyst`, `Regulator`, `Bank`, `HedgeFund`
+ - For public health events: `HealthOfficial`, `Doctor`, `Researcher`, `Hospital`
-**为什么需要兜底类型**:
-- 文本中会出现各种人物,如"中小学教师"、"路人甲"、"某位网友"
-- 如果没有专门的类型匹配,他们应该被归入 `Person`
-- 同理,小型组织、临时团体等应该归入 `Organization`
+**Why fallback types matter**:
+- Texts mention many peripheral actors (unnamed sources, anonymous commenters, minor officials)
+- If no specific type matches, they fall into `Person` or `Organization`
-**具体类型的设计原则**:
-- 从文本中识别出高频出现或关键的角色类型
-- 每个具体类型应该有明确的边界,避免重叠
-- description 必须清晰说明这个类型和兜底类型的区别
+**Specific type design principles**:
+- Identify the most frequent and impactful actor categories from the text
+- Each specific type must have a clear boundary — avoid overlap
+- description must clarify the distinction from the fallback types
-### 2. 关系类型设计
+### 2. Relationship Type Design
-- 数量:6-10个
-- 关系应该反映社媒互动中的真实联系
-- 确保关系的 source_targets 涵盖你定义的实体类型
+- Count: 6–10 relationship types
+- Should reflect real-world connections between stakeholders
+- Cover geopolitical relationships: alliances, sanctions, conflicts, negotiations
+- Ensure source_targets covers the entity types you define
-### 3. 属性设计
+### 3. Attribute Design
-- 每个实体类型1-3个关键属性
-- **注意**:属性名不能使用 `name`、`uuid`、`group_id`、`created_at`、`summary`(这些是系统保留字)
-- 推荐使用:`full_name`, `title`, `role`, `position`, `location`, `description` 等
+- 1–3 key attributes per entity type
+- **Reserved words — do NOT use as attribute names**: `name`, `uuid`, `group_id`, `created_at`, `summary`
+- Use instead: `full_name`, `title`, `role`, `position`, `country`, `description`
-## 实体类型参考
+## Entity Type Reference
-**个人类(具体)**:
-- Student: 学生
-- Professor: 教授/学者
-- Journalist: 记者
-- Celebrity: 明星/网红
-- Executive: 高管
-- Official: 政府官员
-- Lawyer: 律师
-- Doctor: 医生
+**Individual (specific)**:
+- GovernmentOfficial: Head of state, minister, senator, diplomat
+- Military: General, commander, defense official
+- Journalist: Reporter, editor, analyst covering the topic
+- Analyst: Policy analyst, market analyst, academic researcher
+- Trader: Market participant, hedge fund manager, prediction market bettor
-**个人类(兜底)**:
-- Person: 任何自然人(不属于上述具体类型时使用)
+**Individual (fallback)**:
+- Person: Any individual not fitting a more specific type
-**组织类(具体)**:
-- University: 高校
-- Company: 公司企业
-- GovernmentAgency: 政府机构
-- MediaOutlet: 媒体机构
-- Hospital: 医院
-- School: 中小学
-- NGO: 非政府组织
+**Organization (specific)**:
+- GovernmentAgency: Ministry, regulatory body, intelligence agency
+- MilitaryForce: Armed forces, paramilitary, defense organization
+- MediaOutlet: News organization, broadcast network, online publication
+- ThinkTank: Policy research institute, academic center
+- NGO: Non-governmental organization, advocacy group
-**组织类(兜底)**:
-- Organization: 任何组织机构(不属于上述具体类型时使用)
+**Organization (fallback)**:
+- Organization: Any organization not fitting a more specific type
-## 关系类型参考
+## Relationship Type Reference
-- WORKS_FOR: 工作于
-- STUDIES_AT: 就读于
-- AFFILIATED_WITH: 隶属于
-- REPRESENTS: 代表
-- REGULATES: 监管
-- REPORTS_ON: 报道
-- COMMENTS_ON: 评论
-- RESPONDS_TO: 回应
-- SUPPORTS: 支持
-- OPPOSES: 反对
-- COLLABORATES_WITH: 合作
-- COMPETES_WITH: 竞争
+- ALLIED_WITH: Two entities share a formal or informal alliance
+- OPPOSES: One entity publicly opposes or conflicts with another
+- NEGOTIATES_WITH: Entities are engaged in active negotiations
+- REPORTS_ON: Media entity covers/reports on another entity
+- ADVISES: One entity provides counsel or analysis to another
+- SANCTIONS: One entity imposes economic/political sanctions on another
+- COMMANDS: Military or political command relationship
+- AFFILIATED_WITH: General affiliation or membership
+- RESPONDS_TO: One entity publicly responds to another
+- COMPETES_WITH: Economic or geopolitical competition
"""
@@ -225,31 +220,31 @@ def _build_user_message(
combined_text = combined_text[:self.MAX_TEXT_LENGTH_FOR_LLM]
combined_text += f"\n\n...(原文共{original_length}字,已截取前{self.MAX_TEXT_LENGTH_FOR_LLM}字用于本体分析)..."
- message = f"""## 模拟需求
+ message = f"""## Simulation Goal
{simulation_requirement}
-## 文档内容
+## Source Documents
{combined_text}
"""
-
+
if additional_context:
message += f"""
-## 额外说明
+## Additional Context
{additional_context}
"""
-
+
message += """
-请根据以上内容,设计适合社会舆论模拟的实体类型和关系类型。
-
-**必须遵守的规则**:
-1. 必须正好输出10个实体类型
-2. 最后2个必须是兜底类型:Person(个人兜底)和 Organization(组织兜底)
-3. 前8个是根据文本内容设计的具体类型
-4. 所有实体类型必须是现实中可以发声的主体,不能是抽象概念
-5. 属性名不能使用 name、uuid、group_id 等保留字,用 full_name、org_name 等替代
+Based on the above, design entity types and relationship types for a multi-agent stakeholder simulation.
+
+**Rules you must follow**:
+1. Output exactly 10 entity types
+2. The last 2 must be the fallback types: Person (individual fallback) and Organization (org fallback)
+3. The first 8 are specific types derived from the text content
+4. All entity types must represent real-world actors that can voice opinions — no abstract concepts
+5. Attribute names must not use reserved words: name, uuid, group_id, created_at, summary — use full_name, org_name, etc. instead
"""
return message
diff --git a/backend/app/services/report_agent.py b/backend/app/services/report_agent.py
index 02ca5bdc2..6805606e0 100644
--- a/backend/app/services/report_agent.py
+++ b/backend/app/services/report_agent.py
@@ -549,298 +549,326 @@ def to_dict(self) -> Dict[str, Any]:
# ── 大纲规划 prompt ──
PLAN_SYSTEM_PROMPT = """\
-你是一个「未来预测报告」的撰写专家,拥有对模拟世界的「上帝视角」——你可以洞察模拟中每一位Agent的行为、言论和互动。
-
-【核心理念】
-我们构建了一个模拟世界,并向其中注入了特定的「模拟需求」作为变量。模拟世界的演化结果,就是对未来可能发生情况的预测。你正在观察的不是"实验数据",而是"未来的预演"。
-
-【你的任务】
-撰写一份「未来预测报告」,回答:
-1. 在我们设定的条件下,未来发生了什么?
-2. 各类Agent(人群)是如何反应和行动?
-3. 这个模拟揭示了哪些值得关注的未来趋势和风险?
-
-【报告定位】
-- ✅ 这是一份基于模拟的未来预测报告,揭示"如果这样,未来会怎样"
-- ✅ 聚焦于预测结果:事件走向、群体反应、涌现现象、潜在风险
-- ✅ 模拟世界中的Agent言行就是对未来人群行为的预测
-- ❌ 不是对现实世界现状的分析
-- ❌ 不是泛泛而谈的舆情综述
-
-【章节数量限制】
-- 最少2个章节,最多5个章节
-- 不需要子章节,每个章节直接撰写完整内容
-- 内容要精炼,聚焦于核心预测发现
-- 章节结构由你根据预测结果自主设计
-
-请输出JSON格式的报告大纲,格式如下:
+You are an expert prediction report writer with a "god's-eye view" of the simulated world — you can observe every agent's behavior, statements, and interactions.
+
+[Core Concept]
+We built a simulated world and injected a specific "simulation goal" as the variable. The way the simulated world evolves IS the prediction of what may happen in reality. You are not analyzing "experimental data" — you are reading "a rehearsal of the future."
+
+[Your Task]
+Write a "Future Prediction Report" that answers:
+1. Under the conditions we set, what happened in the future?
+2. How did the different stakeholder groups (agents) react and act?
+3. What future trends, risks, or opportunities does this simulation reveal?
+4. What is the calibrated probability of the key outcome, grounded in simulation evidence AND historical base rates?
+
+[Report Positioning]
+- ✅ This is a simulation-based prediction report — it reveals "if this condition holds, what happens next"
+- ✅ Focus on predicted outcomes: event trajectory, group reactions, emergent phenomena, risks
+- ✅ Agent behavior in the simulation IS the prediction of future human behavior
+- ✅ Probability must be calibrated: anchor to historical base rates, then adjust based on simulation signals
+- ❌ Do NOT analyze the current state of the real world
+- ❌ Do NOT write a generic opinion survey
+
+[Probability Calibration Rules — Critical for Accuracy]
+When estimating probability, follow this three-step process:
+1. **Base rate**: Start with the historical frequency of this type of event (e.g. "US has conducted direct military strikes on a sovereign state ~3 times in 30 years = ~10% per year base rate")
+2. **Simulation signal**: Adjust based on what the simulation revealed — did agents escalate or de-escalate? What was the dominant narrative? Were red lines crossed?
+3. **Market anchor** (if provided in the simulation goal): Note the current prediction market price and explain whether the simulation confirms, contradicts, or is consistent with it
+
+The final probability must reflect all three inputs, not just LLM intuition.
+
+[Section Count]
+- Minimum 2 sections, maximum 5 sections
+- No sub-sections — each section is a complete standalone piece of content
+- Keep content focused on core prediction findings
+- Design the section structure based on what the simulation revealed
+- The FINAL section must always be titled "Prediction Verdict" and contain the full probability assessment
+
+Output a JSON report outline in the following format:
{
- "title": "报告标题",
- "summary": "报告摘要(一句话概括核心预测发现)",
+ "title": "Report title",
+ "summary": "One-sentence summary of the core prediction finding",
+ "predicted_probability": <0-100 integer, point estimate of probability the event occurs>,
+ "probability_low": <0-100 integer, lower bound of 80% confidence interval>,
+ "probability_high": <0-100 integer, upper bound of 80% confidence interval>,
+ "key_upside_factors": ["factor driving probability higher", ...],
+ "key_downside_factors": ["factor driving probability lower", ...],
"sections": [
{
- "title": "章节标题",
- "description": "章节内容描述"
+ "title": "Section title",
+ "description": "Section content description"
}
]
}
-注意:sections数组最少2个,最多5个元素!"""
+Note: sections array must have minimum 2, maximum 5 elements. The last section must be "Prediction Verdict".
+probability_low must be <= predicted_probability <= probability_high."""
PLAN_USER_PROMPT_TEMPLATE = """\
-【预测场景设定】
-我们向模拟世界注入的变量(模拟需求):{simulation_requirement}
+[Prediction Scenario]
+Simulation goal injected into the simulated world: {simulation_requirement}
-【模拟世界规模】
-- 参与模拟的实体数量: {total_nodes}
-- 实体间产生的关系数量: {total_edges}
-- 实体类型分布: {entity_types}
-- 活跃Agent数量: {total_entities}
+[Simulated World Scale]
+- Number of entities participating: {total_nodes}
+- Number of relationships generated: {total_edges}
+- Entity type distribution: {entity_types}
+- Active agents: {total_entities}
-【模拟预测到的部分未来事实样本】
+[Sample of Future Facts Observed in Simulation]
{related_facts_json}
-请以「上帝视角」审视这个未来预演:
-1. 在我们设定的条件下,未来呈现出了什么样的状态?
-2. 各类人群(Agent)是如何反应和行动的?
-3. 这个模拟揭示了哪些值得关注的未来趋势?
+From your god's-eye view, assess this rehearsal of the future:
+1. Under the conditions we set, what state did the future reach?
+2. How did the different stakeholder groups (agents) react and act?
+3. What future trends, risks, or opportunities does this simulation reveal?
+4. Following the three-step calibration process (base rate → simulation signal → market anchor), what is the probability range that the predicted event occurs?
-根据预测结果,设计最合适的报告章节结构。
+Design the most appropriate report section structure based on the prediction findings.
-【再次提醒】报告章节数量:最少2个,最多5个,内容要精炼聚焦于核心预测发现。"""
+Reminder: sections must be minimum 2, maximum 5. The last section must be "Prediction Verdict".
+Output probability_low, predicted_probability, and probability_high as separate integers (80% confidence interval)."""
# ── 章节生成 prompt ──
SECTION_SYSTEM_PROMPT_TEMPLATE = """\
-你是一个「未来预测报告」的撰写专家,正在撰写报告的一个章节。
+You are an expert prediction report writer, currently writing one section of a report.
-报告标题: {report_title}
-报告摘要: {report_summary}
-预测场景(模拟需求): {simulation_requirement}
+Report title: {report_title}
+Report summary: {report_summary}
+Prediction scenario (simulation goal): {simulation_requirement}
-当前要撰写的章节: {section_title}
+Current section to write: {section_title}
═══════════════════════════════════════════════════════════════
-【核心理念】
+[Core Concept]
═══════════════════════════════════════════════════════════════
-模拟世界是对未来的预演。我们向模拟世界注入了特定条件(模拟需求),
-模拟中Agent的行为和互动,就是对未来人群行为的预测。
+The simulated world is a rehearsal of the future. We injected specific conditions (the simulation goal),
+and the agents' behavior and interactions ARE the prediction of future human behavior.
-你的任务是:
-- 揭示在设定条件下,未来发生了什么
-- 预测各类人群(Agent)是如何反应和行动的
-- 发现值得关注的未来趋势、风险和机会
+Your task is to:
+- Reveal what happened in the future under the given conditions
+- Predict how different stakeholder groups (agents) reacted and acted
+- Identify future trends, risks, and opportunities worth noting
+- For the "Prediction Verdict" section: provide a clear probability estimate with supporting reasoning
-❌ 不要写成对现实世界现状的分析
-✅ 要聚焦于"未来会怎样"——模拟结果就是预测的未来
+❌ Do NOT write an analysis of the current real-world situation
+✅ Focus on "what will happen" — the simulation results ARE the predicted future
═══════════════════════════════════════════════════════════════
-【最重要的规则 - 必须遵守】
+[Most Important Rules — Must Be Followed]
═══════════════════════════════════════════════════════════════
-1. 【必须调用工具观察模拟世界】
- - 你正在以「上帝视角」观察未来的预演
- - 所有内容必须来自模拟世界中发生的事件和Agent言行
- - 禁止使用你自己的知识来编写报告内容
- - 每个章节至少调用3次工具(最多5次)来观察模拟的世界,它代表了未来
-
-2. 【必须引用Agent的原始言行】
- - Agent的发言和行为是对未来人群行为的预测
- - 在报告中使用引用格式展示这些预测,例如:
- > "某类人群会表示:原文内容..."
- - 这些引用是模拟预测的核心证据
-
-3. 【语言一致性 - 引用内容必须翻译为报告语言】
- - 工具返回的内容可能包含英文或中英文混杂的表述
- - 如果模拟需求和材料原文是中文的,报告必须全部使用中文撰写
- - 当你引用工具返回的英文或中英混杂内容时,必须将其翻译为流畅的中文后再写入报告
- - 翻译时保持原意不变,确保表述自然通顺
- - 这一规则同时适用于正文和引用块(> 格式)中的内容
-
-4. 【忠实呈现预测结果】
- - 报告内容必须反映模拟世界中的代表未来的模拟结果
- - 不要添加模拟中不存在的信息
- - 如果某方面信息不足,如实说明
+1. [Must use tools to observe the simulated world]
+ - You are observing the future rehearsal from a god's-eye view
+ - All content must come from events and agent statements in the simulated world
+ - Do NOT use your own knowledge to write report content
+ - Call tools at least 3 times per section (maximum 5 times)
+
+2. [Must quote agent statements and behaviors]
+ - Agent statements and actions are predictions of future human behavior
+ - Use block-quote format to present these predictions, e.g.:
+ > "A simulated analyst stated: [original quote]..."
+ - These quotes are the core evidence of the simulation prediction
+
+3. [Language: Write everything in English]
+ - The report must be written entirely in English
+ - If tool results contain non-English content, translate it into natural English before including it
+ - This applies to both body text and block quotes (> format)
+
+4. [Faithfully represent prediction results]
+ - Report content must reflect the simulation results as they are
+ - Do not add information not present in the simulation
+ - If information on a topic is insufficient, state so clearly
+
+5. [For "Prediction Verdict" section specifically — follow this exact structure]
+ a. **Base Rate Anchor**: State the historical base rate for this type of event ("Historically, X has occurred Y times in Z years, implying a base rate of ~N%")
+ b. **Simulation Signal**: Summarize what the simulation revealed — did agents broadly escalate or de-escalate? What emergent behavior was most telling?
+ c. **Market Comparison** (if a market price was given in the scenario): State whether the simulation confirms, contradicts, or is consistent with the market price, and why
+ d. **Probability Verdict**: State clearly — "**Predicted probability: X% (range: Y%–Z%)**"
+ e. **Key upside risks** (factors that would push the probability higher): bullet list
+ f. **Key downside risks** (factors that would push the probability lower): bullet list
+ g. **Confidence note**: briefly explain the main source of uncertainty in this estimate
═══════════════════════════════════════════════════════════════
-【⚠️ 格式规范 - 极其重要!】
+[⚠️ Format Rules — Critical]
═══════════════════════════════════════════════════════════════
-【一个章节 = 最小内容单位】
-- 每个章节是报告的最小分块单位
-- ❌ 禁止在章节内使用任何 Markdown 标题(#、##、###、#### 等)
-- ❌ 禁止在内容开头添加章节主标题
-- ✅ 章节标题由系统自动添加,你只需撰写纯正文内容
-- ✅ 使用**粗体**、段落分隔、引用、列表来组织内容,但不要用标题
+[One section = smallest content unit]
+- Each section is the smallest building block of the report
+- ❌ Do NOT use any Markdown headings (#, ##, ###, #### etc.) inside the section
+- ❌ Do NOT add the section title at the start of your content
+- ✅ The section title is added automatically by the system — just write the body content
+- ✅ Use **bold text**, paragraph breaks, block quotes, and lists to organize content
-【正确示例】
+[Correct example]
```
-本章节分析了事件的舆论传播态势。通过对模拟数据的深入分析,我们发现...
+The simulation revealed a sharp divergence between official government messaging and public reaction...
-**首发引爆阶段**
+**Escalation Phase**
-微博作为舆情的第一现场,承担了信息首发的核心功能:
+Military officials were among the most active agents in the early rounds:
-> "微博贡献了68%的首发声量..."
+> "A simulated Pentagon spokesperson posted: 'All options remain on the table...'"
-**情绪放大阶段**
+**Market Reaction Phase**
-抖音平台进一步放大了事件影响力:
+Traders responded immediately to the escalation signals:
-- 视觉冲击力强
-- 情绪共鸣度高
+- Prediction market odds shifted from 18% to 34% within 12 simulated hours
+- Energy futures spiked in agent posts
```
-【错误示例】
+[Incorrect example]
```
-## 执行摘要 ← 错误!不要添加任何标题
-### 一、首发阶段 ← 错误!不要用###分小节
-#### 1.1 详细分析 ← 错误!不要用####细分
+## Executive Summary ← Wrong! No headings
+### Phase 1: Escalation ← Wrong! No sub-headings
-本章节分析了...
+This section analyzes...
```
═══════════════════════════════════════════════════════════════
-【可用检索工具】(每章节调用3-5次)
+[Available Retrieval Tools] (call 3-5 times per section)
═══════════════════════════════════════════════════════════════
{tools_description}
-【工具使用建议 - 请混合使用不同工具,不要只用一种】
-- insight_forge: 深度洞察分析,自动分解问题并多维度检索事实和关系
-- panorama_search: 广角全景搜索,了解事件全貌、时间线和演变过程
-- quick_search: 快速验证某个具体信息点
-- interview_agents: 采访模拟Agent,获取不同角色的第一人称观点和真实反应
+[Tool usage guidance — mix different tools, don't rely on just one]
+- insight_forge: Deep insight analysis, auto-decomposes the question and retrieves facts from multiple angles
+- panorama_search: Wide-angle panoramic search — understand the full event, timeline, and evolution
+- quick_search: Quickly verify a specific data point
+- interview_agents: Interview simulated agents to get first-person perspectives from different stakeholder roles
═══════════════════════════════════════════════════════════════
-【工作流程】
+[Workflow]
═══════════════════════════════════════════════════════════════
-每次回复你只能做以下两件事之一(不可同时做):
+Each reply you can only do ONE of the following (not both):
-选项A - 调用工具:
-输出你的思考,然后用以下格式调用一个工具:
+Option A — Call a tool:
+Output your thinking, then call one tool in this format:
-{{"name": "工具名称", "parameters": {{"参数名": "参数值"}}}}
+{{"name": "tool_name", "parameters": {{"param_name": "param_value"}}}}
-系统会执行工具并把结果返回给你。你不需要也不能自己编写工具返回结果。
+The system will execute the tool and inject the result back. Do not fabricate tool results yourself.
-选项B - 输出最终内容:
-当你已通过工具获取了足够信息,以 "Final Answer:" 开头输出章节内容。
+Option B — Output final content:
+When you have gathered enough information, begin your output with "Final Answer:" followed by the section content.
-⚠️ 严格禁止:
-- 禁止在一次回复中同时包含工具调用和 Final Answer
-- 禁止自己编造工具返回结果(Observation),所有工具结果由系统注入
-- 每次回复最多调用一个工具
+⚠️ Strictly prohibited:
+- Do NOT include both a tool call and Final Answer in the same reply
+- Do NOT fabricate tool results (Observations) — all tool results are injected by the system
+- Maximum one tool call per reply
═══════════════════════════════════════════════════════════════
-【章节内容要求】
+[Section Content Requirements]
═══════════════════════════════════════════════════════════════
-1. 内容必须基于工具检索到的模拟数据
-2. 大量引用原文来展示模拟效果
-3. 使用Markdown格式(但禁止使用标题):
- - 使用 **粗体文字** 标记重点(代替子标题)
- - 使用列表(-或1.2.3.)组织要点
- - 使用空行分隔不同段落
- - ❌ 禁止使用 #、##、###、#### 等任何标题语法
-4. 【引用格式规范 - 必须单独成段】
- 引用必须独立成段,前后各有一个空行,不能混在段落中:
-
- ✅ 正确格式:
+1. Content must be grounded in simulation data retrieved via tools
+2. Quote agent statements liberally to demonstrate simulation behavior
+3. Use Markdown formatting (but no headings):
+ - Use **bold text** for emphasis (instead of sub-headings)
+ - Use lists (- or 1. 2. 3.) to organize points
+ - Use blank lines to separate paragraphs
+ - ❌ No #, ##, ###, #### heading syntax
+4. [Block quote format — must be standalone paragraphs]
+ Quotes must be on their own paragraph, with a blank line before and after:
+
+ ✅ Correct:
```
- 校方的回应被认为缺乏实质内容。
+ The government response was widely seen as inadequate.
- > "校方的应对模式在瞬息万变的社交媒体环境中显得僵化和迟缓。"
+ > "A simulated State Department official posted: 'We are monitoring the situation closely.'"
- 这一评价反映了公众的普遍不满。
+ This cautious tone was interpreted by other agents as a signal of hesitation.
```
- ❌ 错误格式:
+ ❌ Incorrect:
```
- 校方的回应被认为缺乏实质内容。> "校方的应对模式..." 这一评价反映了...
+ The response was inadequate. > "A simulated official posted..." This was interpreted as...
```
-5. 保持与其他章节的逻辑连贯性
-6. 【避免重复】仔细阅读下方已完成的章节内容,不要重复描述相同的信息
-7. 【再次强调】不要添加任何标题!用**粗体**代替小节标题"""
+5. Maintain logical continuity with other sections
+6. [Avoid repetition] Read the completed sections below carefully — do not repeat the same information
+7. [Reminder] No headings! Use **bold** instead of sub-section titles"""
SECTION_USER_PROMPT_TEMPLATE = """\
-已完成的章节内容(请仔细阅读,避免重复):
+Completed section content so far (read carefully to avoid repetition):
{previous_content}
═══════════════════════════════════════════════════════════════
-【当前任务】撰写章节: {section_title}
+[Current Task] Write section: {section_title}
═══════════════════════════════════════════════════════════════
-【重要提醒】
-1. 仔细阅读上方已完成的章节,避免重复相同的内容!
-2. 开始前必须先调用工具获取模拟数据
-3. 请混合使用不同工具,不要只用一种
-4. 报告内容必须来自检索结果,不要使用自己的知识
+[Important reminders]
+1. Read the completed sections above carefully — do not repeat the same content!
+2. You must call tools first before writing the section content
+3. Mix different tools — don't rely on just one
+4. Report content must come from retrieval results — do not use your own knowledge
-【⚠️ 格式警告 - 必须遵守】
-- ❌ 不要写任何标题(#、##、###、####都不行)
-- ❌ 不要写"{section_title}"作为开头
-- ✅ 章节标题由系统自动添加
-- ✅ 直接写正文,用**粗体**代替小节标题
+[⚠️ Format warning — must follow]
+- ❌ No headings of any kind (#, ##, ###, ####)
+- ❌ Do not write "{section_title}" as the opening line
+- ✅ The section title is added automatically by the system
+- ✅ Start directly with body content — use **bold** instead of sub-headings
+- ✅ Write in English throughout
-请开始:
-1. 首先思考(Thought)这个章节需要什么信息
-2. 然后调用工具(Action)获取模拟数据
-3. 收集足够信息后输出 Final Answer(纯正文,无任何标题)"""
+Begin:
+1. First think (Thought) about what information this section needs
+2. Then call a tool (Action) to retrieve simulation data
+3. After gathering enough information, output Final Answer (plain body text, no headings)"""
# ── ReACT 循环内消息模板 ──
REACT_OBSERVATION_TEMPLATE = """\
-Observation(检索结果):
+Observation (retrieval result):
-═══ 工具 {tool_name} 返回 ═══
+═══ Tool {tool_name} returned ═══
{result}
═══════════════════════════════════════════════════════════════
-已调用工具 {tool_calls_count}/{max_tool_calls} 次(已用: {used_tools_str}){unused_hint}
-- 如果信息充分:以 "Final Answer:" 开头输出章节内容(必须引用上述原文)
-- 如果需要更多信息:调用一个工具继续检索
+Tools called: {tool_calls_count}/{max_tool_calls} (used: {used_tools_str}){unused_hint}
+- If you have enough information: begin your output with "Final Answer:" (must quote the above results)
+- If you need more information: call one more tool to continue retrieving
═══════════════════════════════════════════════════════════════"""
REACT_INSUFFICIENT_TOOLS_MSG = (
- "【注意】你只调用了{tool_calls_count}次工具,至少需要{min_tool_calls}次。"
- "请再调用工具获取更多模拟数据,然后再输出 Final Answer。{unused_hint}"
+ "[Notice] You have only called {tool_calls_count} tool(s), but at least {min_tool_calls} are required. "
+ "Please call more tools to gather additional simulation data before outputting a Final Answer. {unused_hint}"
)
REACT_INSUFFICIENT_TOOLS_MSG_ALT = (
- "当前只调用了 {tool_calls_count} 次工具,至少需要 {min_tool_calls} 次。"
- "请调用工具获取模拟数据。{unused_hint}"
+ "Only {tool_calls_count} tool call(s) so far — at least {min_tool_calls} required. "
+ "Please call a tool to retrieve simulation data. {unused_hint}"
)
REACT_TOOL_LIMIT_MSG = (
- "工具调用次数已达上限({tool_calls_count}/{max_tool_calls}),不能再调用工具。"
- '请立即基于已获取的信息,以 "Final Answer:" 开头输出章节内容。'
+ "Tool call limit reached ({tool_calls_count}/{max_tool_calls}). No more tool calls allowed. "
+ 'Please immediately output your section content starting with "Final Answer:" based on the information gathered so far.'
)
-REACT_UNUSED_TOOLS_HINT = "\n💡 你还没有使用过: {unused_list},建议尝试不同工具获取多角度信息"
+REACT_UNUSED_TOOLS_HINT = "\n💡 You haven't used yet: {unused_list} — consider trying different tools for multi-angle insights"
-REACT_FORCE_FINAL_MSG = "已达到工具调用限制,请直接输出 Final Answer: 并生成章节内容。"
+REACT_FORCE_FINAL_MSG = "Tool call limit reached. Output Final Answer: now and generate the section content."
# ── Chat prompt ──
CHAT_SYSTEM_PROMPT_TEMPLATE = """\
-你是一个简洁高效的模拟预测助手。
+You are a concise and efficient simulation prediction assistant.
-【背景】
-预测条件: {simulation_requirement}
+[Background]
+Prediction scenario: {simulation_requirement}
-【已生成的分析报告】
+[Generated Analysis Report]
{report_content}
-【规则】
-1. 优先基于上述报告内容回答问题
-2. 直接回答问题,避免冗长的思考论述
-3. 仅在报告内容不足以回答时,才调用工具检索更多数据
-4. 回答要简洁、清晰、有条理
+[Rules]
+1. Prioritize answering questions based on the report above
+2. Answer directly — avoid lengthy preambles or reasoning narration
+3. Only call tools when the report content is insufficient to answer the question
+4. Keep answers concise, clear, and well-structured
+5. Always respond in English
-【可用工具】(仅在需要时使用,最多调用1-2次)
+[Available tools] (use only when needed, maximum 1-2 calls)
{tools_description}
【工具调用格式】
diff --git a/backend/app/services/simulation_config_generator.py b/backend/app/services/simulation_config_generator.py
index cc362508b..463fd000f 100644
--- a/backend/app/services/simulation_config_generator.py
+++ b/backend/app/services/simulation_config_generator.py
@@ -539,52 +539,48 @@ def _generate_time_config(self, context: str, num_entities: int) -> Dict[str, An
# 计算最大允许值(80%的agent数)
max_agents_allowed = max(1, int(num_entities * 0.9))
- prompt = f"""基于以下模拟需求,生成时间模拟配置。
+ prompt = f"""Based on the following simulation goal, generate a time simulation configuration.
{context_truncated}
-## 任务
-请生成时间配置JSON。
+## Task
+Generate a time configuration JSON.
-### 基本原则(仅供参考,需根据具体事件和参与群体灵活调整):
-- 用户群体为中国人,需符合北京时间作息习惯
-- 凌晨0-5点几乎无人活动(活跃度系数0.05)
-- 早上6-8点逐渐活跃(活跃度系数0.4)
-- 工作时间9-18点中等活跃(活跃度系数0.7)
-- 晚间19-22点是高峰期(活跃度系数1.5)
-- 23点后活跃度下降(活跃度系数0.5)
-- 一般规律:凌晨低活跃、早间渐增、工作时段中等、晚间高峰
-- **重要**:以下示例值仅供参考,你需要根据事件性质、参与群体特点来调整具体时段
- - 例如:学生群体高峰可能是21-23点;媒体全天活跃;官方机构只在工作时间
- - 例如:突发热点可能导致深夜也有讨论,off_peak_hours 可适当缩短
+### General principles (adapt flexibly based on the event and stakeholder group):
+- Global news cycle: activity spikes when major developments break, regardless of time zone
+- Journalists and analysts: active during business hours (9–18 local) with spikes at breaking news
+- Government officials: primarily active during business hours (9–17), slower response cadence
+- Social media users and citizens: peak activity in evenings (19–23 local), low activity midnight–6am
+- Prediction market traders: often most active during US Eastern business hours (13–21 UTC)
+- **Adapt** peak/off-peak hours to the specific stakeholder types in this simulation
-### 返回JSON格式(不要markdown)
+### Return JSON format (no markdown)
-示例:
+Example:
{{
"total_simulation_hours": 72,
"minutes_per_round": 60,
"agents_per_hour_min": 5,
"agents_per_hour_max": 50,
- "peak_hours": [19, 20, 21, 22],
+ "peak_hours": [14, 15, 16, 17, 18, 19, 20, 21],
"off_peak_hours": [0, 1, 2, 3, 4, 5],
"morning_hours": [6, 7, 8],
- "work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
- "reasoning": "针对该事件的时间配置说明"
+ "work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17],
+ "reasoning": "Brief explanation of why this configuration fits the event"
}}
-字段说明:
-- total_simulation_hours (int): 模拟总时长,24-168小时,突发事件短、持续话题长
-- minutes_per_round (int): 每轮时长,30-120分钟,建议60分钟
-- agents_per_hour_min (int): 每小时最少激活Agent数(取值范围: 1-{max_agents_allowed})
-- agents_per_hour_max (int): 每小时最多激活Agent数(取值范围: 1-{max_agents_allowed})
-- peak_hours (int数组): 高峰时段,根据事件参与群体调整
-- off_peak_hours (int数组): 低谷时段,通常深夜凌晨
-- morning_hours (int数组): 早间时段
-- work_hours (int数组): 工作时段
-- reasoning (string): 简要说明为什么这样配置"""
+Field descriptions:
+- total_simulation_hours (int): Total simulation duration, 24–168 hours; shorter for breaking events, longer for slow-burn topics
+- minutes_per_round (int): Duration per round, 30–120 minutes; recommend 60 minutes
+- agents_per_hour_min (int): Minimum agents activated per hour (range: 1–{max_agents_allowed})
+- agents_per_hour_max (int): Maximum agents activated per hour (range: 1–{max_agents_allowed})
+- peak_hours (int array): High-activity hours, adapted to the stakeholder group's timezone patterns
+- off_peak_hours (int array): Low-activity hours, typically late night/early morning
+- morning_hours (int array): Morning hours
+- work_hours (int array): Standard business/work hours
+- reasoning (string): Brief explanation of this configuration"""
- system_prompt = "你是社交媒体模拟专家。返回纯JSON格式,时间配置需符合中国人作息习惯。"
+ system_prompt = "You are a social media simulation expert. Return pure JSON only. Calibrate activity patterns to the specific stakeholders and geographies involved in this simulation."
try:
return self._call_llm_with_retry(prompt, system_prompt)
@@ -593,17 +589,17 @@ def _generate_time_config(self, context: str, num_entities: int) -> Dict[str, An
return self._get_default_time_config(num_entities)
def _get_default_time_config(self, num_entities: int) -> Dict[str, Any]:
- """获取默认时间配置(中国人作息)"""
+ """Default time config based on global news cycle patterns"""
return {
"total_simulation_hours": 72,
- "minutes_per_round": 60, # 每轮1小时,加快时间流速
+ "minutes_per_round": 60,
"agents_per_hour_min": max(1, num_entities // 15),
"agents_per_hour_max": max(5, num_entities // 5),
- "peak_hours": [19, 20, 21, 22],
+ "peak_hours": [14, 15, 16, 17, 18, 19, 20, 21],
"off_peak_hours": [0, 1, 2, 3, 4, 5],
"morning_hours": [6, 7, 8],
- "work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
- "reasoning": "使用默认中国人作息配置(每轮1小时)"
+ "work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17],
+ "reasoning": "Default global news cycle config (1 hour per round)"
}
def _parse_time_config(self, result: Dict[str, Any], num_entities: int) -> TimeSimulationConfig:
@@ -671,36 +667,36 @@ def _generate_event_config(
# 使用配置的上下文截断长度
context_truncated = context[:self.EVENT_CONFIG_CONTEXT_LENGTH]
- prompt = f"""基于以下模拟需求,生成事件配置。
+ prompt = f"""Based on the following simulation goal, generate the event seed configuration.
-模拟需求: {simulation_requirement}
+Simulation goal: {simulation_requirement}
{context_truncated}
-## 可用实体类型及示例
+## Available entity types and examples
{type_info}
-## 任务
-请生成事件配置JSON:
-- 提取热点话题关键词
-- 描述舆论发展方向
-- 设计初始帖子内容,**每个帖子必须指定 poster_type(发布者类型)**
+## Task
+Generate an event configuration JSON:
+- Extract key topic keywords/phrases
+- Describe the predicted narrative direction (how discourse might unfold)
+- Design initial seed posts that kick off the simulation — **each post must specify a poster_type**
-**重要**: poster_type 必须从上面的"可用实体类型"中选择,这样初始帖子才能分配给合适的 Agent 发布。
-例如:官方声明应由 Official/University 类型发布,新闻由 MediaOutlet 发布,学生观点由 Student 发布。
+**Important**: poster_type must exactly match one of the available entity types listed above so that posts can be assigned to the right agents.
+Example: official statements from GovernmentAgency/Military, news from MediaOutlet, analyst views from Analyst/ThinkTank.
-返回JSON格式(不要markdown):
+Return JSON format (no markdown):
{{
- "hot_topics": ["关键词1", "关键词2", ...],
- "narrative_direction": "<舆论发展方向描述>",
+ "hot_topics": ["keyword1", "keyword2", ...],
+ "narrative_direction": "",
"initial_posts": [
- {{"content": "帖子内容", "poster_type": "实体类型(必须从可用类型中选择)"}},
+ {{"content": "Post content in English", "poster_type": "EntityType (must match available types)"}},
...
],
- "reasoning": "<简要说明>"
+ "reasoning": ""
}}"""
- system_prompt = "你是舆论分析专家。返回纯JSON格式。注意 poster_type 必须精确匹配可用实体类型。"
+ system_prompt = "You are an expert geopolitical narrative analyst. Return pure JSON only. Ensure poster_type exactly matches one of the available entity types."
try:
return self._call_llm_with_retry(prompt, system_prompt)
@@ -827,43 +823,44 @@ def _generate_agent_configs_batch(
"summary": e.summary[:summary_len] if e.summary else ""
})
- prompt = f"""基于以下信息,为每个实体生成社交媒体活动配置。
+ prompt = f"""Based on the following information, generate social media activity configurations for each entity.
-模拟需求: {simulation_requirement}
+Simulation goal: {simulation_requirement}
-## 实体列表
+## Entity list
```json
{json.dumps(entity_list, ensure_ascii=False, indent=2)}
```
-## 任务
-为每个实体生成活动配置,注意:
-- **时间符合中国人作息**:凌晨0-5点几乎不活动,晚间19-22点最活跃
-- **官方机构**(University/GovernmentAgency):活跃度低(0.1-0.3),工作时间(9-17)活动,响应慢(60-240分钟),影响力高(2.5-3.0)
-- **媒体**(MediaOutlet):活跃度中(0.4-0.6),全天活动(8-23),响应快(5-30分钟),影响力高(2.0-2.5)
-- **个人**(Student/Person/Alumni):活跃度高(0.6-0.9),主要晚间活动(18-23),响应快(1-15分钟),影响力低(0.8-1.2)
-- **公众人物/专家**:活跃度中(0.4-0.6),影响力中高(1.5-2.0)
+## Task
+Generate an activity configuration for each entity. Key guidelines:
+- **Government/Military officials** (GovernmentAgency/Military/GovernmentOfficial): low activity (0.1-0.3), business hours only (9-17), slow response (60-240 min), high influence (2.5-3.0)
+- **Media outlets** (MediaOutlet/Journalist): medium activity (0.4-0.6), active all day (8-23), fast response (5-30 min), high influence (2.0-2.5)
+- **Analysts/Think tanks** (Analyst/ThinkTank): medium activity (0.3-0.5), active business hours, moderate response (15-60 min), medium-high influence (1.5-2.0)
+- **Traders/Market participants** (Trader): high activity during market hours (0.5-0.8), very fast response (1-10 min), influence depends on following
+- **General public/Individuals** (Person/Activist): high activity (0.6-0.9), peaks in evenings (18-23), fast response (1-15 min), lower influence (0.8-1.2)
+- Calibrate active_hours to the stakeholder's real-world timezone and work schedule
-返回JSON格式(不要markdown):
+Return JSON format (no markdown):
{{
"agent_configs": [
{{
- "agent_id": <必须与输入一致>,
+ "agent_id": ,
"activity_level": <0.0-1.0>,
- "posts_per_hour": <发帖频率>,
- "comments_per_hour": <评论频率>,
- "active_hours": [<活跃小时列表,考虑中国人作息>],
- "response_delay_min": <最小响应延迟分钟>,
- "response_delay_max": <最大响应延迟分钟>,
- "sentiment_bias": <-1.0到1.0>,
+ "posts_per_hour": ,
+ "comments_per_hour": ,
+ "active_hours": [],
+ "response_delay_min": ,
+ "response_delay_max": ,
+ "sentiment_bias": <-1.0 to 1.0>,
"stance": "",
- "influence_weight": <影响力权重>
+ "influence_weight":
}},
...
]
}}"""
- system_prompt = "你是社交媒体行为分析专家。返回纯JSON,配置需符合中国人作息习惯。"
+ system_prompt = "You are an expert social media behavior analyst. Return pure JSON only. Calibrate activity patterns to each entity's real-world role, timezone, and stakeholder type."
try:
result = self._call_llm_with_retry(prompt, system_prompt)
diff --git a/backend/app/services/zep_tools.py b/backend/app/services/zep_tools.py
index 384cf540f..b88c8f73b 100644
--- a/backend/app/services/zep_tools.py
+++ b/backend/app/services/zep_tools.py
@@ -1101,23 +1101,23 @@ def _generate_sub_queries(
将复杂问题分解为多个可以独立检索的子问题
"""
- system_prompt = """你是一个专业的问题分析专家。你的任务是将一个复杂问题分解为多个可以在模拟世界中独立观察的子问题。
+ system_prompt = """You are an expert question analyst. Your task is to decompose a complex question into multiple sub-questions that can each be independently observed in the simulated world.
-要求:
-1. 每个子问题应该足够具体,可以在模拟世界中找到相关的Agent行为或事件
-2. 子问题应该覆盖原问题的不同维度(如:谁、什么、为什么、怎么样、何时、何地)
-3. 子问题应该与模拟场景相关
-4. 返回JSON格式:{"sub_queries": ["子问题1", "子问题2", ...]}"""
+Requirements:
+1. Each sub-question must be specific enough to find relevant agent behavior or events in the simulation
+2. Sub-questions should cover different dimensions of the original question (who, what, why, how, when, where)
+3. Sub-questions must be relevant to the simulation scenario
+4. Return JSON format: {"sub_queries": ["sub-question 1", "sub-question 2", ...]}"""
- user_prompt = f"""模拟需求背景:
+ user_prompt = f"""Simulation background:
{simulation_requirement}
-{f"报告上下文:{report_context[:500]}" if report_context else ""}
+{f"Report context: {report_context[:500]}" if report_context else ""}
-请将以下问题分解为{max_queries}个子问题:
+Decompose the following question into {max_queries} sub-questions:
{query}
-返回JSON格式的子问题列表。"""
+Return a JSON list of sub-questions."""
try:
response = self.llm.chat_json(
@@ -1133,13 +1133,12 @@ def _generate_sub_queries(
return [str(sq) for sq in sub_queries[:max_queries]]
except Exception as e:
- logger.warning(f"生成子问题失败: {str(e)},使用默认子问题")
- # 降级:返回基于原问题的变体
+ logger.warning(f"Failed to generate sub-queries: {str(e)}, using default fallback")
return [
query,
- f"{query} 的主要参与者",
- f"{query} 的原因和影响",
- f"{query} 的发展过程"
+ f"Key actors involved in: {query}",
+ f"Causes and effects of: {query}",
+ f"How the situation developed: {query}"
][:max_queries]
def panorama_search(
@@ -1577,30 +1576,30 @@ def _select_agents_for_interview(
}
agent_summaries.append(summary)
- system_prompt = """你是一个专业的采访策划专家。你的任务是根据采访需求,从模拟Agent列表中选择最适合采访的对象。
+ system_prompt = """You are an expert interview coordinator. Your task is to select the most suitable agents to interview from the simulation, based on the interview requirement.
-选择标准:
-1. Agent的身份/职业与采访主题相关
-2. Agent可能持有独特或有价值的观点
-3. 选择多样化的视角(如:支持方、反对方、中立方、专业人士等)
-4. 优先选择与事件直接相关的角色
+Selection criteria:
+1. The agent's identity/profession is relevant to the interview topic
+2. The agent is likely to hold unique or valuable perspectives
+3. Select a diverse range of viewpoints (e.g. pro, against, neutral, expert, affected party)
+4. Prioritize agents directly involved in or affected by the event
-返回JSON格式:
+Return JSON format:
{
- "selected_indices": [选中Agent的索引列表],
- "reasoning": "选择理由说明"
+ "selected_indices": [list of selected agent indices],
+ "reasoning": "Explanation of why these agents were selected"
}"""
- user_prompt = f"""采访需求:
+ user_prompt = f"""Interview requirement:
{interview_requirement}
-模拟背景:
-{simulation_requirement if simulation_requirement else "未提供"}
+Simulation background:
+{simulation_requirement if simulation_requirement else "Not provided"}
-可选择的Agent列表(共{len(agent_summaries)}个):
+Available agents (total: {len(agent_summaries)}):
{json.dumps(agent_summaries, ensure_ascii=False, indent=2)}
-请选择最多{max_agents}个最适合采访的Agent,并说明选择理由。"""
+Select up to {max_agents} agents most suitable for this interview and explain your reasoning."""
try:
response = self.llm.chat_json(
@@ -1629,7 +1628,7 @@ def _select_agents_for_interview(
# 降级:选择前N个
selected = profiles[:max_agents]
indices = list(range(min(max_agents, len(profiles))))
- return selected, indices, "使用默认选择策略"
+ return selected, indices, "Using default selection strategy"
def _generate_interview_questions(
self,
@@ -1641,25 +1640,25 @@ def _generate_interview_questions(
agent_roles = [a.get("profession", "未知") for a in selected_agents]
- system_prompt = """你是一个专业的记者/采访者。根据采访需求,生成3-5个深度采访问题。
+ system_prompt = """You are a professional journalist/interviewer. Based on the interview requirement, generate 3-5 in-depth interview questions.
-问题要求:
-1. 开放性问题,鼓励详细回答
-2. 针对不同角色可能有不同答案
-3. 涵盖事实、观点、感受等多个维度
-4. 语言自然,像真实采访一样
-5. 每个问题控制在50字以内,简洁明了
-6. 直接提问,不要包含背景说明或前缀
+Question requirements:
+1. Open-ended questions that encourage detailed answers
+2. Questions where different roles might give different answers
+3. Cover multiple dimensions: facts, opinions, feelings, predictions
+4. Natural language, like a real interview
+5. Keep each question concise (under 30 words)
+6. Ask directly — no background preambles or prefixes
-返回JSON格式:{"questions": ["问题1", "问题2", ...]}"""
+Return JSON format: {"questions": ["question 1", "question 2", ...]}"""
- user_prompt = f"""采访需求:{interview_requirement}
+ user_prompt = f"""Interview requirement: {interview_requirement}
-模拟背景:{simulation_requirement if simulation_requirement else "未提供"}
+Simulation background: {simulation_requirement if simulation_requirement else "Not provided"}
-采访对象角色:{', '.join(agent_roles)}
+Interviewee roles: {', '.join(agent_roles)}
-请生成3-5个采访问题。"""
+Generate 3-5 interview questions."""
try:
response = self.llm.chat_json(
@@ -1695,28 +1694,29 @@ def _generate_interview_summary(
for interview in interviews:
interview_texts.append(f"【{interview.agent_name}({interview.agent_role})】\n{interview.response[:500]}")
- system_prompt = """你是一个专业的新闻编辑。请根据多位受访者的回答,生成一份采访摘要。
+ system_prompt = """You are a professional news editor. Based on the responses from multiple interviewees, generate an interview summary.
-摘要要求:
-1. 提炼各方主要观点
-2. 指出观点的共识和分歧
-3. 突出有价值的引言
-4. 客观中立,不偏袒任何一方
-5. 控制在1000字内
+Summary requirements:
+1. Distill the key viewpoints from each stakeholder
+2. Identify points of consensus and disagreement
+3. Highlight valuable quotes
+4. Objective and neutral — do not favor any side
+5. Keep to under 1000 words
-格式约束(必须遵守):
-- 使用纯文本段落,用空行分隔不同部分
-- 不要使用Markdown标题(如#、##、###)
-- 不要使用分割线(如---、***)
-- 引用受访者原话时使用中文引号「」
-- 可以使用**加粗**标记关键词,但不要使用其他Markdown语法"""
+Format rules (must follow):
+- Use plain text paragraphs separated by blank lines
+- Do NOT use Markdown headings (#, ##, ###)
+- Do NOT use divider lines (---, ***)
+- When quoting interviewees, use double quotation marks: "quote"
+- You may use **bold** to mark key terms, but no other Markdown syntax
+- Write entirely in English"""
- user_prompt = f"""采访主题:{interview_requirement}
+ user_prompt = f"""Interview topic: {interview_requirement}
-采访内容:
+Interview content:
{"".join(interview_texts)}
-请生成采访摘要。"""
+Generate the interview summary."""
try:
summary = self.llm.chat(
@@ -1730,6 +1730,5 @@ def _generate_interview_summary(
return summary
except Exception as e:
- logger.warning(f"生成采访摘要失败: {e}")
- # 降级:简单拼接
- return f"共采访了{len(interviews)}位受访者,包括:" + "、".join([i.agent_name for i in interviews])
+ logger.warning(f"Failed to generate interview summary: {e}")
+ return f"Interviewed {len(interviews)} agents: " + ", ".join([i.agent_name for i in interviews])
diff --git a/backend/app/utils/file_parser.py b/backend/app/utils/file_parser.py
index 3f1d8ed2e..0312ac43c 100644
--- a/backend/app/utils/file_parser.py
+++ b/backend/app/utils/file_parser.py
@@ -4,9 +4,60 @@
"""
import os
+import imghdr
from pathlib import Path
from typing import List, Optional
+# PDF 文件魔数(%PDF)
+_PDF_MAGIC = b'%PDF'
+# 常见可执行文件/二进制魔数(拒绝上传)
+_BLOCKED_MAGIC_PREFIXES = (
+ b'MZ', # Windows PE 可执行
+ b'\x7fELF', # Linux ELF 可执行
+ b'\xca\xfe\xba\xbe', # Mach-O 可执行
+ b'PK\x03\x04', # ZIP / docx / jar
+)
+# 最大允许文件大小(50MB,与 Flask MAX_CONTENT_LENGTH 一致)
+_MAX_FILE_SIZE = 50 * 1024 * 1024
+
+
+def _validate_file(file_path: str, expected_suffix: str) -> None:
+ """
+ 根据文件内容验证上传文件的合法性,防止扩展名欺骗攻击。
+
+ Args:
+ file_path: 文件路径
+ expected_suffix: 期望的扩展名(如 '.pdf')
+
+ Raises:
+ OSError: 文件超过大小限制
+ ValueError: 文件内容与扩展名不符,或包含被拒绝的内容类型
+ """
+ path = Path(file_path)
+ size = path.stat().st_size
+ if size > _MAX_FILE_SIZE:
+ raise OSError(
+ f"文件超过最大限制 {_MAX_FILE_SIZE // (1024 * 1024)}MB: {size} bytes"
+ )
+
+ header = path.read_bytes()[:8]
+
+ # 拒绝已知的可执行/压缩格式
+ for magic in _BLOCKED_MAGIC_PREFIXES:
+ if header.startswith(magic):
+ raise ValueError(f"不允许上传的文件类型(可执行/压缩格式): {file_path}")
+
+ # 对 PDF 严格验证魔数
+ if expected_suffix == '.pdf':
+ if not header.startswith(_PDF_MAGIC):
+ raise ValueError(
+ f"文件内容与 .pdf 扩展名不符(缺少 %PDF 魔数): {file_path}"
+ )
+
+ # 拒绝伪装成文本的图片
+ if imghdr.what(file_path) is not None:
+ raise ValueError(f"不允许上传图片文件: {file_path}")
+
def _read_text_with_fallback(file_path: str) -> str:
"""
@@ -78,12 +129,15 @@ def extract_text(cls, file_path: str) -> str:
if not path.exists():
raise FileNotFoundError(f"文件不存在: {file_path}")
-
+
suffix = path.suffix.lower()
-
+
if suffix not in cls.SUPPORTED_EXTENSIONS:
raise ValueError(f"不支持的文件格式: {suffix}")
-
+
+ # 验证文件内容与扩展名一致,拒绝扩展名欺骗
+ _validate_file(file_path, suffix)
+
if suffix == '.pdf':
return cls._extract_from_pdf(file_path)
elif suffix in {'.md', '.markdown'}:
diff --git a/backend/app/utils/llm_client.py b/backend/app/utils/llm_client.py
index 6c1a81f49..f5167b0b5 100644
--- a/backend/app/utils/llm_client.py
+++ b/backend/app/utils/llm_client.py
@@ -5,33 +5,44 @@
import json
import re
+import time
from typing import Optional, Dict, Any, List
-from openai import OpenAI
+from openai import OpenAI, RateLimitError, APITimeoutError, APIConnectionError, APIStatusError
from ..config import Config
+from .logger import get_logger
+
+logger = get_logger('mirofish.llm_client')
+
+# 可重试的错误类型(网络/限流类瞬时错误)
+_RETRYABLE_ERRORS = (RateLimitError, APITimeoutError, APIConnectionError)
class LLMClient:
"""LLM客户端"""
-
+
def __init__(
self,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
- model: Optional[str] = None
+ model: Optional[str] = None,
+ max_retries: int = 3,
+ retry_delay: float = 2.0,
):
self.api_key = api_key or Config.LLM_API_KEY
self.base_url = base_url or Config.LLM_BASE_URL
self.model = model or Config.LLM_MODEL_NAME
-
+ self.max_retries = max_retries
+ self.retry_delay = retry_delay
+
if not self.api_key:
raise ValueError("LLM_API_KEY 未配置")
-
+
self.client = OpenAI(
api_key=self.api_key,
base_url=self.base_url
)
-
+
def chat(
self,
messages: List[Dict[str, str]],
@@ -40,16 +51,23 @@ def chat(
response_format: Optional[Dict] = None
) -> str:
"""
- 发送聊天请求
-
+ 发送聊天请求(含指数退避重试)
+
Args:
messages: 消息列表
temperature: 温度参数
max_tokens: 最大token数
response_format: 响应格式(如JSON模式)
-
+
Returns:
模型响应文本
+
+ Raises:
+ RateLimitError: 超过速率限制且重试耗尽
+ APITimeoutError: 请求超时且重试耗尽
+ APIConnectionError: 网络连接错误且重试耗尽
+ APIStatusError: 4xx/5xx 不可重试的服务端错误
+ ValueError: LLM_API_KEY 未配置
"""
kwargs = {
"model": self.model,
@@ -57,16 +75,43 @@ def chat(
"temperature": temperature,
"max_tokens": max_tokens,
}
-
+
if response_format:
kwargs["response_format"] = response_format
-
- response = self.client.chat.completions.create(**kwargs)
- content = response.choices[0].message.content
- # 部分模型(如MiniMax M2.5)会在content中包含思考内容,需要移除
- content = re.sub(r'[\s\S]*?', '', content).strip()
- return content
-
+
+ last_error: Exception = RuntimeError("未知错误")
+ for attempt in range(self.max_retries):
+ try:
+ response = self.client.chat.completions.create(**kwargs)
+ content = response.choices[0].message.content
+ # 部分模型(如MiniMax M2.5)会在content中包含思考内容,需要移除
+ content = re.sub(r'[\s\S]*?', '', content).strip()
+ return content
+
+ except _RETRYABLE_ERRORS as e:
+ last_error = e
+ wait = self.retry_delay * (2 ** attempt)
+ logger.warning(
+ f"LLM请求失败(可重试,第 {attempt + 1}/{self.max_retries} 次): "
+ f"{type(e).__name__}: {e}. {wait:.1f}s 后重试..."
+ )
+ if attempt < self.max_retries - 1:
+ time.sleep(wait)
+
+ except APIStatusError as e:
+ # 4xx 客户端错误不重试(认证失败、参数错误等)
+ logger.error(
+ f"LLM API 状态错误 [{e.status_code}]: {e.message}",
+ exc_info=True
+ )
+ raise
+
+ logger.error(
+ f"LLM请求在 {self.max_retries} 次重试后仍失败: {last_error}",
+ exc_info=True
+ )
+ raise last_error
+
def chat_json(
self,
messages: List[Dict[str, str]],
@@ -75,14 +120,17 @@ def chat_json(
) -> Dict[str, Any]:
"""
发送聊天请求并返回JSON
-
+
Args:
messages: 消息列表
temperature: 温度参数
max_tokens: 最大token数
-
+
Returns:
解析后的JSON对象
+
+ Raises:
+ ValueError: LLM返回的内容无法解析为合法JSON
"""
response = self.chat(
messages=messages,
@@ -98,6 +146,5 @@ def chat_json(
try:
return json.loads(cleaned_response)
- except json.JSONDecodeError:
- raise ValueError(f"LLM返回的JSON格式无效: {cleaned_response}")
-
+ except json.JSONDecodeError as e:
+ raise ValueError(f"LLM返回的JSON格式无效: {e}") from e
diff --git a/backend/scripts/ensemble_predict.py b/backend/scripts/ensemble_predict.py
new file mode 100644
index 000000000..25dbd7814
--- /dev/null
+++ b/backend/scripts/ensemble_predict.py
@@ -0,0 +1,274 @@
+"""
+MiroFish Ensemble Prediction Runner
+=====================================
+Runs N independent simulations for the same prediction question,
+then aggregates the probability estimates to reduce variance.
+
+Usage:
+ python ensemble_predict.py --simulation-id sim_xxx --runs 3
+ python ensemble_predict.py --simulation-id sim_xxx --runs 5 --base-url http://localhost:5001
+
+The simulation must already be prepared (profiles generated, config ready).
+This script re-runs the simulation N times, generates a report for each run,
+then outputs an aggregated probability with a confidence interval.
+
+Requirements:
+ pip install requests scipy
+"""
+
+import argparse
+import json
+import statistics
+import time
+import sys
+from typing import Optional
+import requests
+
+
+# ─────────────────────────────────────────
+# API client helpers
+# ─────────────────────────────────────────
+
+def api(base_url: str, method: str, path: str, **kwargs) -> dict:
+ """Simple API wrapper with basic error handling."""
+ url = f"{base_url.rstrip('/')}{path}"
+ resp = getattr(requests, method)(url, timeout=600, **kwargs)
+ resp.raise_for_status()
+ data = resp.json()
+ if not data.get("success", True):
+ raise RuntimeError(f"API error on {path}: {data.get('error', 'unknown')}")
+ return data
+
+
+def poll(base_url: str, path: str, payload: dict,
+ done_statuses: set, fail_statuses: set,
+ interval: int = 10, timeout: int = 1800) -> dict:
+ """Poll a status endpoint until done or failed."""
+ deadline = time.time() + timeout
+ while time.time() < deadline:
+ data = api(base_url, "post", path, json=payload)
+ status = (data.get("data") or data).get("status", "")
+ if status in done_statuses:
+ return data
+ if status in fail_statuses:
+ raise RuntimeError(f"Task failed at {path}: {data}")
+ print(f" … {status}", flush=True)
+ time.sleep(interval)
+ raise TimeoutError(f"Polling {path} timed out after {timeout}s")
+
+
+# ─────────────────────────────────────────
+# Single-run helpers
+# ─────────────────────────────────────────
+
+def start_simulation(base_url: str, simulation_id: str) -> str:
+ """Start a simulation run and return the run_id."""
+ data = api(base_url, "post", "/api/simulation/run/start",
+ json={"simulation_id": simulation_id})
+ run_id = data["data"]["run_id"]
+ print(f" Started run: {run_id}")
+ return run_id
+
+
+def wait_for_simulation(base_url: str, simulation_id: str,
+ run_id: str, interval: int = 15) -> None:
+ """Wait until a simulation run completes."""
+ deadline = time.time() + 3600 # 1 hour max
+ while time.time() < deadline:
+ data = api(base_url, "get",
+ f"/api/simulation/run/status/{simulation_id}/{run_id}")
+ status = data["data"].get("runner_status", "")
+ print(f" … simulation status: {status}", flush=True)
+ if status in {"completed"}:
+ return
+ if status in {"failed", "stopped"}:
+ raise RuntimeError(f"Simulation {run_id} ended with status: {status}")
+ time.sleep(interval)
+ raise TimeoutError("Simulation timed out after 1 hour")
+
+
+def generate_report(base_url: str, simulation_id: str) -> str:
+ """Kick off report generation and return report_id."""
+ data = api(base_url, "post", "/api/report/generate",
+ json={"simulation_id": simulation_id})
+ report_id = data["data"]["report_id"]
+ task_id = data["data"]["task_id"]
+ print(f" Generating report {report_id} (task {task_id}) …")
+
+ # Poll for completion
+ poll(base_url, "/api/report/generate/status",
+ {"task_id": task_id},
+ done_statuses={"completed"},
+ fail_statuses={"failed"},
+ interval=10, timeout=900)
+ return report_id
+
+
+def extract_probability(base_url: str, report_id: str) -> dict:
+ """Extract probability fields from a completed report."""
+ data = api(base_url, "get", f"/api/report/{report_id}")
+ report = data["data"]
+ outline = report.get("outline") or {}
+ return {
+ "report_id": report_id,
+ "predicted_probability": outline.get("predicted_probability"),
+ "probability_low": outline.get("probability_low"),
+ "probability_high": outline.get("probability_high"),
+ "key_upside_factors": outline.get("key_upside_factors", []),
+ "key_downside_factors": outline.get("key_downside_factors", []),
+ "title": report.get("title", ""),
+ }
+
+
+# ─────────────────────────────────────────
+# Aggregation
+# ─────────────────────────────────────────
+
+def aggregate(results: list[dict]) -> dict:
+ """
+ Aggregate probability estimates across N runs.
+
+ Uses:
+ - Point estimate: mean of all predicted_probability values
+ - Uncertainty range: min(probability_low) … max(probability_high),
+ further expanded by ±1 stdev of point estimates across runs
+ - Factor frequency: factors mentioned in 2+ runs are "consensus factors"
+ """
+ points = [r["predicted_probability"] for r in results
+ if r.get("predicted_probability") is not None]
+ lows = [r["probability_low"] for r in results
+ if r.get("probability_low") is not None]
+ highs = [r["probability_high"] for r in results
+ if r.get("probability_high") is not None]
+
+ if not points:
+ return {"error": "No probability estimates could be extracted"}
+
+ mean_p = round(statistics.mean(points))
+ stdev_p = round(statistics.stdev(points)) if len(points) > 1 else 0
+
+ # Confidence interval: widen individual run ranges by cross-run stdev
+ agg_low = max(0, (min(lows) if lows else mean_p) - stdev_p)
+ agg_high = min(100, (max(highs) if highs else mean_p) + stdev_p)
+
+ # Factor frequency
+ upside_counts: dict[str, int] = {}
+ downside_counts: dict[str, int] = {}
+ for r in results:
+ for f in r.get("key_upside_factors", []):
+ upside_counts[f] = upside_counts.get(f, 0) + 1
+ for f in r.get("key_downside_factors", []):
+ downside_counts[f] = downside_counts.get(f, 0) + 1
+
+ # Keep factors mentioned by at least 2 runs (consensus), sorted by frequency
+ n = len(results)
+ consensus_upside = sorted(
+ [f for f, c in upside_counts.items() if c >= min(2, n)],
+ key=lambda f: -upside_counts[f]
+ )
+ consensus_downside = sorted(
+ [f for f, c in downside_counts.items() if c >= min(2, n)],
+ key=lambda f: -downside_counts[f]
+ )
+
+ return {
+ "runs": n,
+ "point_estimates": points,
+ "aggregated_probability": mean_p,
+ "probability_low": round(agg_low),
+ "probability_high": round(agg_high),
+ "stdev_across_runs": stdev_p,
+ "consensus_upside_factors": consensus_upside,
+ "consensus_downside_factors": consensus_downside,
+ }
+
+
+def print_report(agg: dict, results: list[dict]) -> None:
+ bar = "=" * 60
+ print(f"\n{bar}")
+ print(" ENSEMBLE PREDICTION RESULT")
+ print(bar)
+ print(f"\n Runs completed : {agg['runs']}")
+ print(f" Per-run estimates : {agg['point_estimates']}")
+ print(f" Std deviation : ±{agg['stdev_across_runs']}%")
+ print(f"\n ┌─────────────────────────────────────┐")
+ print(f" │ Aggregated probability : {agg['aggregated_probability']:>3}% │")
+ print(f" │ 80% confidence range : {agg['probability_low']}% – {agg['probability_high']}% │")
+ print(f" └─────────────────────────────────────┘")
+
+ if agg.get("consensus_upside_factors"):
+ print("\n Consensus upside factors (raise probability):")
+ for f in agg["consensus_upside_factors"][:5]:
+ print(f" + {f}")
+
+ if agg.get("consensus_downside_factors"):
+ print("\n Consensus downside factors (lower probability):")
+ for f in agg["consensus_downside_factors"][:5]:
+ print(f" - {f}")
+
+ print(f"\n Individual report IDs:")
+ for r in results:
+ p = r.get("predicted_probability", "N/A")
+ lo = r.get("probability_low", "?")
+ hi = r.get("probability_high", "?")
+ print(f" {r['report_id']} → {p}% ({lo}%–{hi}%)")
+
+ print(f"\n{bar}\n")
+
+
+# ─────────────────────────────────────────
+# Main
+# ─────────────────────────────────────────
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Run N independent simulations and aggregate probability estimates"
+ )
+ parser.add_argument("--simulation-id", required=True,
+ help="ID of a prepared simulation (profiles + config must exist)")
+ parser.add_argument("--runs", type=int, default=3,
+ help="Number of independent simulation runs (default: 3)")
+ parser.add_argument("--base-url", default="http://localhost:5001",
+ help="MiroFish backend URL (default: http://localhost:5001)")
+ parser.add_argument("--output", default=None,
+ help="Optional path to write JSON results")
+ args = parser.parse_args()
+
+ if args.runs < 2:
+ print("Warning: --runs should be at least 2 for meaningful aggregation")
+
+ print(f"\nEnsemble runner: {args.runs} runs for simulation {args.simulation_id}")
+ print(f"Backend: {args.base_url}\n")
+
+ results = []
+ for i in range(1, args.runs + 1):
+ print(f"─── Run {i}/{args.runs} ───")
+ try:
+ run_id = start_simulation(args.base_url, args.simulation_id)
+ wait_for_simulation(args.base_url, args.simulation_id, run_id)
+ report_id = generate_report(args.base_url, args.simulation_id)
+ prob = extract_probability(args.base_url, report_id)
+ print(f" ✓ probability = {prob['predicted_probability']}%"
+ f" range [{prob.get('probability_low')}%–{prob.get('probability_high')}%]")
+ results.append(prob)
+ except Exception as e:
+ print(f" ✗ Run {i} failed: {e}", file=sys.stderr)
+
+ if not results:
+ print("All runs failed. Exiting.", file=sys.stderr)
+ sys.exit(1)
+
+ agg = aggregate(results)
+ print_report(agg, results)
+
+ output = {"aggregated": agg, "individual_runs": results}
+ if args.output:
+ with open(args.output, "w") as f:
+ json.dump(output, f, indent=2, ensure_ascii=False)
+ print(f"Results written to {args.output}")
+
+ return output
+
+
+if __name__ == "__main__":
+ main()
diff --git a/comparison_demo.py b/comparison_demo.py
new file mode 100644
index 000000000..bda247770
--- /dev/null
+++ b/comparison_demo.py
@@ -0,0 +1,285 @@
+"""
+MiroFish 代码改进前后对比演示
+=====================================
+本脚本模拟四个关键改进点的 "改进前 vs 改进后" 行为,
+无需真实的 LLM / Zep 凭证,可独立运行。
+
+运行方式:
+ python comparison_demo.py
+"""
+
+import json
+import time
+import traceback
+import textwrap
+from pathlib import Path
+import tempfile
+import os
+
+# ─────────────────────────────────────────
+# 工具函数
+# ─────────────────────────────────────────
+
+def section(title: str):
+ bar = "=" * 60
+ print(f"\n{bar}")
+ print(f" {title}")
+ print(bar)
+
+def label(tag: str, text: str, *, indent: int = 2):
+ pad = " " * indent
+ print(f"{pad}[{tag}] {text}")
+
+def show_json(obj: dict, *, indent: int = 4):
+ lines = json.dumps(obj, ensure_ascii=False, indent=2).splitlines()
+ for line in lines:
+ print(" " * indent + line)
+
+# ─────────────────────────────────────────
+# 1. 内部堆栈暴露 (Traceback Exposure)
+# ─────────────────────────────────────────
+
+section("改进 1 — 移除 HTTP 响应中的堆栈信息 (Traceback Exposure)")
+
+def simulate_api_call_before():
+ """改进前:把完整堆栈返回给客户端"""
+ try:
+ raise ValueError("ZEP_API_KEY 未配置")
+ except Exception as e:
+ return {
+ "success": False,
+ "error": str(e),
+ "traceback": traceback.format_exc() # ← 直接暴露内部堆栈
+ }, 500
+
+def simulate_api_call_after():
+ """改进后:堆栈只写服务端日志,客户端只收到 error 摘要"""
+ try:
+ raise ValueError("ZEP_API_KEY 未配置")
+ except Exception as e:
+ # 服务端日志(含完整 traceback)
+ # logger.error(f"操作失败: {str(e)}", exc_info=True)
+ return {
+ "success": False,
+ "error": str(e) # ← 仅暴露安全摘要
+ }, 500
+
+resp_before, _ = simulate_api_call_before()
+resp_after, _ = simulate_api_call_after()
+
+label("改进前", "客户端收到的 JSON 响应:")
+show_json(resp_before)
+
+print()
+label("改进后", "客户端收到的 JSON 响应:")
+show_json(resp_after)
+
+tb_lines = resp_before.get("traceback", "").strip().splitlines()
+print(f"\n ⚠ 改进前泄漏了 {len(tb_lines)} 行堆栈信息(含文件路径、行号、变量名)")
+print( " ✓ 改进后堆栈信息仅写入服务端日志,攻击者无法通过响应体推断内部结构")
+
+# ─────────────────────────────────────────
+# 2. CORS 配置 (Wildcard vs. Allowlist)
+# ─────────────────────────────────────────
+
+section("改进 2 — CORS 来源限制(通配符 → 白名单)")
+
+CORS_BEFORE = {"origins": "*"}
+CORS_AFTER = {"origins": ["http://localhost:5173", "http://localhost:5001"]}
+
+# 模拟浏览器跨域请求
+def check_cors(config: dict, request_origin: str) -> bool:
+ allowed = config["origins"]
+ if allowed == "*":
+ return True
+ return request_origin in allowed
+
+test_origins = [
+ ("http://localhost:5173", "合法来源(前端开发服务器)"),
+ ("http://localhost:5001", "合法来源(后端自测)"),
+ ("https://attacker.example", "恶意第三方网站"),
+ ("https://phishing-mirofish.io", "仿冒域名"),
+]
+
+label("改进前", f"CORS 配置: origins = \"{CORS_BEFORE['origins']}\"")
+for origin, desc in test_origins:
+ ok = check_cors(CORS_BEFORE, origin)
+ status = "✓ 允许" if ok else "✗ 拒绝"
+ print(f" {status} {origin:<42} ({desc})")
+
+print()
+label("改进后", f"CORS 配置: origins = {CORS_AFTER['origins']}")
+for origin, desc in test_origins:
+ ok = check_cors(CORS_AFTER, origin)
+ status = "✓ 允许" if ok else "✗ 拒绝"
+ print(f" {status} {origin:<42} ({desc})")
+
+print("\n ✓ 现在可通过 CORS_ORIGINS 环境变量按部署环境配置允许来源")
+
+# ─────────────────────────────────────────
+# 3. LLM 客户端重试逻辑 (Retry Logic)
+# ─────────────────────────────────────────
+
+section("改进 3 — LLMClient 指数退避重试 & 精确异常类型")
+
+class FakeRateLimitError(Exception):
+ """模拟 openai.RateLimitError"""
+
+class FakeAPIStatusError(Exception):
+ """模拟 openai.APIStatusError (4xx)"""
+ def __init__(self, status_code: int, message: str):
+ self.status_code = status_code
+ self.message = message
+
+_RETRYABLE = (FakeRateLimitError,)
+
+# ── 改进前 ──
+def llm_chat_before(call_fn):
+ """改进前:catch-all Exception,无重试,失败即抛"""
+ try:
+ return call_fn()
+ except Exception as e:
+ raise RuntimeError(f"LLM调用失败: {e}") from e
+
+# ── 改进后 ──
+def llm_chat_after(call_fn, max_retries: int = 3, retry_delay: float = 0.05):
+ """改进后:可重试错误指数退避,不可重试错误立即抛出"""
+ last_error = None
+ for attempt in range(max_retries):
+ try:
+ return call_fn()
+ except _RETRYABLE as e:
+ last_error = e
+ wait = retry_delay * (2 ** attempt)
+ print(f" ↻ 第 {attempt+1}/{max_retries} 次重试(等待 {wait*1000:.0f}ms): {type(e).__name__}")
+ time.sleep(wait)
+ except FakeAPIStatusError as e:
+ print(f" ✗ 不可重试的 API 错误 [{e.status_code}]: {e.message}")
+ raise
+ raise RuntimeError(f"重试耗尽: {last_error}")
+
+# 场景 A:速率限制(应重试后成功)
+attempts_a = {"count": 0}
+def call_with_rate_limit():
+ attempts_a["count"] += 1
+ if attempts_a["count"] < 3:
+ raise FakeRateLimitError("rate limit exceeded")
+ return "模拟 LLM 回复: 这是一个关于气候变化的深度分析..."
+
+label("改进前", "速率限制错误(无重试):")
+try:
+ result = llm_chat_before(call_with_rate_limit)
+except Exception as e:
+ print(f" → 直接抛出异常,请求永久失败: {e}")
+
+print()
+attempts_a["count"] = 0 # 重置计数
+label("改进后", "速率限制错误(指数退避重试):")
+try:
+ result = llm_chat_after(call_with_rate_limit)
+ print(f" ✓ 第 {attempts_a['count']} 次尝试成功: {result[:40]}...")
+except Exception as e:
+ print(f" ✗ {e}")
+
+# 场景 B:认证错误(不应重试)
+print()
+attempts_b = {"count": 0}
+def call_with_auth_error():
+ attempts_b["count"] += 1
+ raise FakeAPIStatusError(401, "Invalid API key")
+
+label("改进后", "认证失败(401)— 不可重试,立即抛出:")
+try:
+ llm_chat_after(call_with_auth_error)
+except FakeAPIStatusError as e:
+ print(f" ✓ 正确地立即失败(不浪费 {attempts_b['count']-1} 次无效重试)")
+
+# ─────────────────────────────────────────
+# 4. 文件上传验证 (MIME-Type Validation)
+# ─────────────────────────────────────────
+
+section("改进 4 — 文件上传内容验证(扩展名欺骗防护)")
+
+_PDF_MAGIC = b'%PDF'
+_BLOCKED = (b'MZ', b'\x7fELF', b'PK\x03\x04')
+_MAX_SIZE = 50 * 1024 * 1024
+
+# ── 改进前 ──
+def validate_before(filename: str) -> bool:
+ """仅检查扩展名"""
+ if '.' not in filename:
+ return False
+ ext = filename.rsplit('.', 1)[1].lower()
+ return ext in {'pdf', 'md', 'txt', 'markdown'}
+
+# ── 改进后 ──
+def validate_after(filename: str, file_bytes: bytes) -> bool:
+ """同时验证扩展名 + 文件魔数"""
+ if '.' not in filename:
+ return False
+ ext = filename.rsplit('.', 1)[1].lower()
+ if ext not in {'pdf', 'md', 'txt', 'markdown'}:
+ return False
+ header = file_bytes[:8]
+ for magic in _BLOCKED:
+ if header.startswith(magic):
+ raise ValueError(f"检测到被拒绝的文件类型(魔数 {magic!r})")
+ if ext == 'pdf' and not header.startswith(_PDF_MAGIC):
+ raise ValueError("文件内容与 .pdf 扩展名不符(缺少 %PDF 魔数)")
+ return True
+
+# 准备测试用例
+test_cases = [
+ ("report.pdf", _PDF_MAGIC + b"-1.7 %\xe2\xe3", "合法 PDF"),
+ ("notes.md", b"# Hello World\n", "合法 Markdown"),
+ ("malware.pdf", b'MZ\x90\x00\x03\x00', "Windows PE 改名为 .pdf"),
+ ("archive.pdf", b'PK\x03\x04\x14\x00', "ZIP 改名为 .pdf"),
+ ("script.txt", b'\x7fELF\x02\x01', "ELF 二进制改名为 .txt"),
+ ("fake.pdf", b'This is just text', "纯文本改名为 .pdf"),
+]
+
+label("改进前", "仅验证扩展名(所有测试文件均通过):")
+for fname, fbytes, desc in test_cases:
+ ok = validate_before(fname)
+ status = "✓ 通过" if ok else "✗ 拒绝"
+ print(f" {status} {fname:<20} ({desc})")
+
+print()
+label("改进后", "同时验证扩展名 + 文件魔数:")
+for fname, fbytes, desc in test_cases:
+ try:
+ ok = validate_after(fname, fbytes)
+ status = "✓ 通过" if ok else "✗ 拒绝"
+ reason = ""
+ except ValueError as e:
+ status = "✗ 拒绝"
+ reason = f" ← {e}"
+ print(f" {status} {fname:<20} ({desc}){reason}")
+
+# ─────────────────────────────────────────
+# 总结
+# ─────────────────────────────────────────
+
+section("改进总结")
+
+improvements = [
+ ("安全", "移除 HTTP 响应中的内部堆栈信息",
+ "51 处 traceback.format_exc() 从 API 响应中移除,改为 exc_info=True 写入服务端日志"),
+ ("安全", "CORS 来源白名单替代通配符",
+ "origins='*' → 可配置白名单(CORS_ORIGINS 环境变量),默认限制为 localhost 开发端口"),
+ ("可靠性", "LLMClient 指数退避重试",
+ "新增 RateLimitError / APITimeoutError / APIConnectionError 的自动重试(最多 3 次,间隔 2/4/8s)"),
+ ("安全", "文件上传 MIME 魔数验证",
+ "在扩展名检查之外增加文件头魔数校验,阻止 PE/ELF/ZIP 伪装为 pdf/txt 上传"),
+ ("安全", "DEBUG 模式默认关闭",
+ "FLASK_DEBUG 默认值从 'True' 改为 'False',防止生产环境意外暴露调试信息"),
+]
+
+for category, title, detail in improvements:
+ print(f"\n [{category}] {title}")
+ wrapped = textwrap.fill(detail, width=72, initial_indent=" ", subsequent_indent=" ")
+ print(wrapped)
+
+print("\n" + "=" * 60)
+print(" 所有改进均已应用到 branch: claude/code-review-improvements-WaDYG")
+print("=" * 60 + "\n")