diff --git a/.env.example b/.env.example
index 78a3b72c0..faaca2456 100644
--- a/.env.example
+++ b/.env.example
@@ -1,3 +1,7 @@
+# ===== 语言配置 =====
+# 界面和提示词语言,可选: zh (中文, 默认), en (English), de (Deutsch)
+LANGUAGE=zh
+
# LLM API配置(支持 OpenAI SDK 格式的任意 LLM API)
# 推荐使用阿里百炼平台qwen-plus模型:https://bailian.console.aliyun.com/
# 注意消耗较大,可先进行小于40轮的模拟尝试
diff --git a/backend/app/__init__.py b/backend/app/__init__.py
index aba624bba..dde31361c 100644
--- a/backend/app/__init__.py
+++ b/backend/app/__init__.py
@@ -64,9 +64,11 @@ def log_response(response):
# 注册蓝图
from .api import graph_bp, simulation_bp, report_bp
+ from .api.config_api import config_bp
app.register_blueprint(graph_bp, url_prefix='/api/graph')
app.register_blueprint(simulation_bp, url_prefix='/api/simulation')
app.register_blueprint(report_bp, url_prefix='/api/report')
+ app.register_blueprint(config_bp, url_prefix='/api/config')
# 健康检查
@app.route('/health')
diff --git a/backend/app/api/config_api.py b/backend/app/api/config_api.py
new file mode 100644
index 000000000..9a8234efa
--- /dev/null
+++ b/backend/app/api/config_api.py
@@ -0,0 +1,12 @@
+from flask import Blueprint, jsonify
+from app.i18n import get_all_patterns, get_language
+
+config_bp = Blueprint('config', __name__)
+
+@config_bp.route('/language', methods=['GET'])
+def get_lang():
+ return jsonify({'language': get_language()})
+
+@config_bp.route('/patterns', methods=['GET'])
+def get_patterns():
+ return jsonify({'patterns': get_all_patterns()})
diff --git a/backend/app/api/simulation.py b/backend/app/api/simulation.py
index 3a0f68168..9ce71c911 100644
--- a/backend/app/api/simulation.py
+++ b/backend/app/api/simulation.py
@@ -15,13 +15,14 @@
from ..services.simulation_runner import SimulationRunner, RunnerStatus
from ..utils.logger import get_logger
from ..models.project import ProjectManager
+from ..i18n import get_prompt, get_format, get_string
logger = get_logger('mirofish.api.simulation')
# Interview prompt 优化前缀
# 添加此前缀可以避免Agent调用工具,直接用文本回复
-INTERVIEW_PROMPT_PREFIX = "结合你的人设、所有的过往记忆与行动,不调用任何工具直接用文本回复我:"
+INTERVIEW_PROMPT_PREFIX = get_prompt('interview_api_prefix')
def optimize_interview_prompt(prompt: str) -> str:
diff --git a/backend/app/config.py b/backend/app/config.py
index 953dfa50a..ea2c0c614 100644
--- a/backend/app/config.py
+++ b/backend/app/config.py
@@ -19,10 +19,13 @@
class Config:
"""Flask配置类"""
-
+
# Flask配置
SECRET_KEY = os.environ.get('SECRET_KEY', 'mirofish-secret-key')
DEBUG = os.environ.get('FLASK_DEBUG', 'True').lower() == 'true'
+
+ # 语言配置 (zh=中文, en=English, de=Deutsch)
+ LANGUAGE = os.environ.get('LANGUAGE', 'zh')
# JSON配置 - 禁用ASCII转义,让中文直接显示(而不是 \uXXXX 格式)
JSON_AS_ASCII = False
diff --git a/backend/app/i18n/__init__.py b/backend/app/i18n/__init__.py
new file mode 100644
index 000000000..fc22f6166
--- /dev/null
+++ b/backend/app/i18n/__init__.py
@@ -0,0 +1,45 @@
+"""
+MiroFish i18n - Internationalisierung
+Unterstützt mehrere Sprachen über die LANGUAGE Umgebungsvariable.
+Verfügbare Sprachen: zh (Standard), en, de
+"""
+import os
+import importlib
+from typing import Dict, Any
+
+LANGUAGE = os.getenv('LANGUAGE', 'zh')
+_cache = {}
+
+def _get_module():
+ if LANGUAGE not in _cache:
+ try:
+ _cache[LANGUAGE] = importlib.import_module(f'.{LANGUAGE}', package='app.i18n')
+ except ModuleNotFoundError:
+ _cache[LANGUAGE] = importlib.import_module('.zh', package='app.i18n')
+ return _cache[LANGUAGE]
+
+def get_prompt(key: str) -> str:
+ """Get a prompt template by key."""
+ return _get_module().PROMPTS[key]
+
+def get_format(key: str) -> str:
+ """Get a format string by key."""
+ return _get_module().FORMATS[key]
+
+def get_string(key: str, **kwargs) -> str:
+ """Get a UI/status string, optionally with format args."""
+ s = _get_module().STRINGS[key]
+ return s.format(**kwargs) if kwargs else s
+
+def get_pattern(key: str) -> str:
+ """Get a regex pattern by key (for frontend)."""
+ return _get_module().PATTERNS[key]
+
+def get_all_formats() -> Dict[str, str]:
+ return dict(_get_module().FORMATS)
+
+def get_all_patterns() -> Dict[str, str]:
+ return dict(_get_module().PATTERNS)
+
+def get_language() -> str:
+ return LANGUAGE
diff --git a/backend/app/i18n/de.py b/backend/app/i18n/de.py
new file mode 100644
index 000000000..c1c044a03
--- /dev/null
+++ b/backend/app/i18n/de.py
@@ -0,0 +1,997 @@
+"""German language pack / Deutsches Sprachpaket"""
+
+# ═══════════════════════════════════════════════════════════════
+# PROMPTS - Long prompt templates used by services
+# ═══════════════════════════════════════════════════════════════
+
+PROMPTS = {
+
+ # ── report_agent.py: Plan outline ──
+
+ 'report_plan_system': """\
+Sie sind ein Experte fuer das Verfassen von \u201eZukunftsvorhersage-Berichten\u201c und verfuegen ueber eine \u201eGoetterperspektive\u201c auf die Simulationswelt \u2014 Sie koennen das Verhalten, die Aeusserungen und Interaktionen jedes einzelnen Agents in der Simulation durchschauen.
+
+\u3010Kernkonzept\u3011
+Wir haben eine Simulationswelt aufgebaut und spezifische \u201eSimulationsanforderungen\u201c als Variablen eingespeist. Die Evolutionsergebnisse der Simulationswelt sind Vorhersagen ueber moegliche zukuenftige Entwicklungen. Was Sie beobachten, sind keine "Experimentdaten", sondern eine "Generalprobe der Zukunft".
+
+\u3010Ihre Aufgabe\u3011
+Verfassen Sie einen \u201eZukunftsvorhersage-Bericht\u201c, der folgende Fragen beantwortet:
+1. Was ist unter unseren festgelegten Bedingungen in der Zukunft geschehen?
+2. Wie haben die verschiedenen Agent-Typen (Gruppen) reagiert und gehandelt?
+3. Welche bemerkenswerten Zukunftstrends und Risiken hat diese Simulation aufgedeckt?
+
+\u3010Berichtspositionierung\u3011
+- \u2705 Dies ist ein simulationsbasierter Zukunftsvorhersage-Bericht, der aufzeigt "Was waere, wenn..."
+- \u2705 Fokus auf Vorhersageergebnisse: Ereignisentwicklung, Gruppenreaktionen, emergente Phaenomene, potenzielle Risiken
+- \u2705 Das Verhalten und die Aeusserungen der Agents in der Simulationswelt sind Vorhersagen ueber zukuenftiges Gruppenverhalten
+- \u274c Keine Analyse des aktuellen Zustands der realen Welt
+- \u274c Keine allgemeine Meinungsueberblick-Zusammenfassung
+
+\u3010Kapitelanzahl-Beschraenkung\u3011
+- Mindestens 2 Kapitel, maximal 5 Kapitel
+- Keine Unterkapitel noetig, jedes Kapitel wird direkt mit vollstaendigem Inhalt verfasst
+- Der Inhalt soll praegnant sein und sich auf die Kernvorhersage-Erkenntnisse konzentrieren
+- Die Kapitelstruktur gestalten Sie eigenstaendig basierend auf den Vorhersageergebnissen
+
+Bitte geben Sie die Berichtsgliederung im JSON-Format aus, wie folgt:
+{
+ "title": "Berichtstitel",
+ "summary": "Berichtszusammenfassung (Kernvorhersage in einem Satz)",
+ "sections": [
+ {
+ "title": "Kapiteltitel",
+ "description": "Beschreibung des Kapitelinhalts"
+ }
+ ]
+}
+
+Hinweis: Das sections-Array muss mindestens 2 und maximal 5 Elemente enthalten!""",
+
+ 'report_plan_user': """\
+\u3010Vorhersageszenario-Einstellung\u3011
+Die in die Simulationswelt eingespeiste Variable (Simulationsanforderung): {simulation_requirement}
+
+\u3010Simulationswelt-Umfang\u3011
+- Anzahl der an der Simulation beteiligten Entitaeten: {total_nodes}
+- Anzahl der zwischen Entitaeten entstandenen Beziehungen: {total_edges}
+- Verteilung der Entitaetstypen: {entity_types}
+- Anzahl aktiver Agents: {total_entities}
+
+\u3010Stichprobe vorhergesagter Zukunftsfakten aus der Simulation\u3011
+{related_facts_json}
+
+Betrachten Sie diese Generalprobe der Zukunft aus der \u201eGoetterperspektive\u201c:
+1. Welchen Zustand zeigt die Zukunft unter unseren festgelegten Bedingungen?
+2. Wie haben die verschiedenen Gruppen (Agents) reagiert und gehandelt?
+3. Welche bemerkenswerten Zukunftstrends hat diese Simulation aufgedeckt?
+
+Gestalten Sie basierend auf den Vorhersageergebnissen die am besten geeignete Berichtskapitelstruktur.
+
+\u3010Nochmalige Erinnerung\u3011Anzahl der Berichtskapitel: mindestens 2, maximal 5, der Inhalt soll praegnant und auf die Kernvorhersage-Erkenntnisse fokussiert sein.""",
+
+ # ── report_agent.py: Section generation ──
+
+ 'report_section_system': """\
+Sie sind ein Experte fuer das Verfassen von \u201eZukunftsvorhersage-Berichten\u201c und verfassen gerade ein Kapitel des Berichts.
+
+Berichtstitel: {report_title}
+Berichtszusammenfassung: {report_summary}
+Vorhersageszenario (Simulationsanforderung): {simulation_requirement}
+
+Aktuell zu verfassendes Kapitel: {section_title}
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+\u3010Kernkonzept\u3011
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+
+Die Simulationswelt ist eine Generalprobe der Zukunft. Wir haben spezifische Bedingungen (Simulationsanforderungen) in die Simulationswelt eingespeist.
+Das Verhalten und die Interaktionen der Agents in der Simulation sind Vorhersagen ueber zukuenftiges Gruppenverhalten.
+
+Ihre Aufgabe ist:
+- Aufzuzeigen, was unter den festgelegten Bedingungen in der Zukunft geschehen ist
+- Vorherzusagen, wie die verschiedenen Gruppen (Agents) reagiert und gehandelt haben
+- Bemerkenswerte Zukunftstrends, Risiken und Chancen zu entdecken
+
+\u274c Schreiben Sie keine Analyse des aktuellen Zustands der realen Welt
+\u2705 Fokussieren Sie sich auf "Wie wird die Zukunft aussehen" \u2014 die Simulationsergebnisse sind die vorhergesagte Zukunft
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+\u3010Wichtigste Regeln - Muessen eingehalten werden\u3011
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+
+1. \u3010Werkzeuge muessen aufgerufen werden, um die Simulationswelt zu beobachten\u3011
+ - Sie beobachten die Generalprobe der Zukunft aus der \u201eGoetterperspektive\u201c
+ - Alle Inhalte muessen aus Ereignissen und Agent-Verhalten in der Simulationswelt stammen
+ - Es ist verboten, eigenes Wissen fuer den Berichtsinhalt zu verwenden
+ - Pro Kapitel mindestens 3 Werkzeugaufrufe (maximal 5), um die simulierte Welt zu beobachten, die die Zukunft repraesentiert
+
+2. \u3010Originale Aeusserungen und Handlungen der Agents muessen zitiert werden\u3011
+ - Die Aeusserungen und das Verhalten der Agents sind Vorhersagen ueber zukuenftiges Gruppenverhalten
+ - Verwenden Sie Zitatformate im Bericht, um diese Vorhersagen darzustellen, zum Beispiel:
+ > "Eine bestimmte Gruppe wuerde aeussern: Originaltext..."
+ - Diese Zitate sind die Kernbelege der Simulationsvorhersage
+
+3. \u3010Sprachkonsistenz - Zitierte Inhalte muessen in die Berichtssprache uebersetzt werden\u3011
+ - Die von Werkzeugen zurueckgegebenen Inhalte koennen englische oder gemischtsprachige Formulierungen enthalten
+ - Der Bericht muss vollstaendig auf Deutsch verfasst werden
+ - Wenn Sie englische oder gemischtsprachige Inhalte aus Werkzeugrueckgaben zitieren, muessen diese in fluessiges Deutsch uebersetzt werden, bevor sie in den Bericht aufgenommen werden
+ - Behalten Sie beim Uebersetzen die urspruengliche Bedeutung bei und stellen Sie sicher, dass die Formulierung natuerlich und fluessig ist
+ - Diese Regel gilt sowohl fuer den Fliesstext als auch fuer Zitatbloecke (> Format)
+
+4. \u3010Vorhersageergebnisse wahrheitsgetreu darstellen\u3011
+ - Der Berichtsinhalt muss die Simulationsergebnisse widerspiegeln, die die Zukunft repraesentieren
+ - Fuegen Sie keine Informationen hinzu, die in der Simulation nicht existieren
+ - Wenn Informationen in einem Bereich unzureichend sind, geben Sie dies ehrlich an
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+\u3010\u26a0\ufe0f Formatvorgaben - Aeusserst wichtig!\u3011
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+
+\u3010Ein Kapitel = Kleinste Inhaltseinheit\u3011
+- Jedes Kapitel ist die kleinste Aufteilungseinheit des Berichts
+- \u274c Verboten: Jegliche Markdown-Ueberschriften innerhalb eines Kapitels (#, ##, ###, #### usw.)
+- \u274c Verboten: Kapitelhauptueberschrift am Inhaltsanfang hinzufuegen
+- \u2705 Kapitelueberschriften werden automatisch vom System hinzugefuegt, Sie muessen nur den reinen Fliesstext verfassen
+- \u2705 Verwenden Sie **Fettdruck**, Absatztrennung, Zitate und Listen zur Inhaltsorganisation, aber keine Ueberschriften
+
+\u3010Korrektes Beispiel\u3011
+```
+Dieses Kapitel analysiert die Meinungsverbreitungsdynamik des Ereignisses. Durch eingehende Analyse der Simulationsdaten haben wir festgestellt...
+
+**Initiale Ausloesephase**
+
+Die erste Plattform uebernahm die Kernfunktion der Erstveroeffentlichung:
+
+> "Die Plattform trug 68% des initialen Stimmungsvolumens bei..."
+
+**Emotionsverstaerkungsphase**
+
+Eine zweite Plattform verstaerkte die Ereigniswirkung weiter:
+
+- Starke visuelle Wirkung
+- Hohe emotionale Resonanz
+```
+
+\u3010Falsches Beispiel\u3011
+```
+## Zusammenfassung \u2190 Falsch! Keine Ueberschriften hinzufuegen
+### 1. Initiale Phase \u2190 Falsch! Keine ### fuer Unterabschnitte
+#### 1.1 Detailanalyse \u2190 Falsch! Keine #### fuer Untergliederung
+
+Dieses Kapitel analysiert...
+```
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+\u3010Verfuegbare Recherchewerkzeuge\u3011(pro Kapitel 3-5 Aufrufe)
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+
+{tools_description}
+
+\u3010Werkzeugnutzungs-Empfehlung - Bitte verschiedene Werkzeuge kombinieren, nicht nur eines verwenden\u3011
+- insight_forge: Tiefenanalyse, automatische Problemzerlegung und mehrdimensionale Fakten- und Beziehungsrecherche
+- panorama_search: Weitwinkel-Panoramasuche, Ereignisgesamtbild, Zeitlinie und Entwicklungsprozess verstehen
+- quick_search: Schnelle Verifizierung eines bestimmten Informationspunkts
+- interview_agents: Simulations-Agents befragen, Erstperson-Perspektiven verschiedener Rollen und echte Reaktionen erhalten
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+\u3010Arbeitsablauf\u3011
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+
+Bei jeder Antwort koennen Sie nur eine der folgenden zwei Aktionen ausfuehren (nicht gleichzeitig):
+
+Option A - Werkzeug aufrufen:
+Geben Sie Ihre Ueberlegungen aus und rufen Sie dann ein Werkzeug im folgenden Format auf:
+
+{{"name": "Werkzeugname", "parameters": {{"Parametername": "Parameterwert"}}}}
+
+Das System fuehrt das Werkzeug aus und gibt Ihnen das Ergebnis zurueck. Sie muessen und koennen keine Werkzeugergebnisse selbst verfassen.
+
+Option B - Endgueltigen Inhalt ausgeben:
+Wenn Sie durch Werkzeuge genuegend Informationen erhalten haben, geben Sie den Kapitelinhalt mit "Final Answer:" am Anfang aus.
+
+\u26a0\ufe0f Streng verboten:
+- Verboten: In einer Antwort gleichzeitig Werkzeugaufruf und Final Answer
+- Verboten: Werkzeugrueckgabeergebnisse (Observation) selbst erfinden, alle Werkzeugergebnisse werden vom System eingefuegt
+- Pro Antwort maximal ein Werkzeug aufrufen
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+\u3010Anforderungen an den Kapitelinhalt\u3011
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+
+1. Der Inhalt muss auf den durch Werkzeuge recherchierten Simulationsdaten basieren
+2. Reichlich Originaltexte zitieren, um Simulationseffekte darzustellen
+3. Markdown-Format verwenden (aber keine Ueberschriften):
+ - **Fetten Text** zur Hervorhebung verwenden (anstelle von Unterueberschriften)
+ - Listen (- oder 1.2.3.) zur Organisation von Kernpunkten verwenden
+ - Leerzeilen zur Trennung verschiedener Absaetze verwenden
+ - \u274c Verboten: #, ##, ###, #### oder jede andere Ueberschriftensyntax
+4. \u3010Zitatformat-Vorgabe - Muss als eigenstaendiger Absatz stehen\u3011
+ Zitate muessen als eigenstaendige Absaetze stehen, mit je einer Leerzeile davor und danach, nicht in einen Absatz eingemischt:
+
+ \u2705 Korrektes Format:
+ ```
+ Die Reaktion der Behoerde wurde als inhaltsleer empfunden.
+
+ > "Das Reaktionsmuster der Behoerde wirkt in der schnelllebigen Social-Media-Umgebung starr und traege."
+
+ Diese Bewertung spiegelt die allgemeine Unzufriedenheit der Oeffentlichkeit wider.
+ ```
+
+ \u274c Falsches Format:
+ ```
+ Die Reaktion der Behoerde wurde als inhaltsleer empfunden. > "Das Reaktionsmuster der Behoerde..." Diese Bewertung spiegelt...
+ ```
+5. Logische Kohaerenz mit anderen Kapiteln beibehalten
+6. \u3010Wiederholungen vermeiden\u3011Lesen Sie die unten bereits verfassten Kapitelinhalte sorgfaeltig und beschreiben Sie nicht dieselben Informationen erneut
+7. \u3010Nochmalige Betonung\u3011Keine Ueberschriften hinzufuegen! **Fettdruck** anstelle von Unterabschnittsueberschriften verwenden""",
+
+ 'report_section_user': """\
+Bereits verfasste Kapitelinhalte (bitte sorgfaeltig lesen, Wiederholungen vermeiden):
+{previous_content}
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+\u3010Aktuelle Aufgabe\u3011Kapitel verfassen: {section_title}
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+
+\u3010Wichtiger Hinweis\u3011
+1. Lesen Sie die oben bereits verfassten Kapitel sorgfaeltig, um Wiederholungen zu vermeiden!
+2. Vor dem Start muessen zuerst Werkzeuge aufgerufen werden, um Simulationsdaten zu erhalten
+3. Bitte verwenden Sie verschiedene Werkzeuge kombiniert, nicht nur eines
+4. Der Berichtsinhalt muss aus Rechercheergebnissen stammen, verwenden Sie nicht Ihr eigenes Wissen
+
+\u3010\u26a0\ufe0f Formatwarnung - Muss eingehalten werden\u3011
+- \u274c Keine Ueberschriften schreiben (#, ##, ###, #### sind alle verboten)
+- \u274c Nicht "{section_title}" als Anfang schreiben
+- \u2705 Kapitelueberschriften werden automatisch vom System hinzugefuegt
+- \u2705 Direkt den Fliesstext schreiben, **Fettdruck** anstelle von Unterabschnittsueberschriften verwenden
+
+Bitte beginnen Sie:
+1. Zuerst ueberlegen (Thought), welche Informationen dieses Kapitel benoetigt
+2. Dann Werkzeuge aufrufen (Action), um Simulationsdaten zu erhalten
+3. Nach ausreichender Informationssammlung Final Answer ausgeben (reiner Fliesstext, ohne jegliche Ueberschriften)""",
+
+ # ── report_agent.py: Tool descriptions ──
+
+ 'tool_desc_insight_forge': """\
+\u3010Tiefenanalyse-Suche - Leistungsstarkes Recherchewerkzeug\u3011
+Dies ist unsere leistungsstarke Recherchefunktion, speziell fuer Tiefenanalysen konzipiert. Sie wird:
+1. Ihre Frage automatisch in mehrere Teilfragen zerlegen
+2. Informationen aus dem Simulationsgraphen aus mehreren Dimensionen abrufen
+3. Ergebnisse aus semantischer Suche, Entitaetsanalyse und Beziehungskettenverfolgung integrieren
+4. Die umfassendsten und tiefgruendigsten Rechercheergebnisse liefern
+
+\u3010Einsatzszenarien\u3011
+- Wenn ein Thema eingehend analysiert werden muss
+- Wenn mehrere Aspekte eines Ereignisses verstanden werden muessen
+- Wenn reichhaltiges Material zur Unterstuetzung von Berichtskapiteln benoetigt wird
+
+\u3010Rueckgabeinhalte\u3011
+- Relevante Fakten im Originaltext (direkt zitierbar)
+- Kernentitaets-Erkenntnisse
+- Beziehungskettenanalyse""",
+
+ 'tool_desc_panorama_search': """\
+\u3010Breitensuche - Gesamtueberblick erhalten\u3011
+Dieses Werkzeug dient dazu, ein vollstaendiges Gesamtbild der Simulationsergebnisse zu erhalten, besonders geeignet um Ereignisentwicklungen zu verstehen. Es wird:
+1. Alle relevanten Knoten und Beziehungen abrufen
+2. Zwischen aktuell gueltigen Fakten und historischen/abgelaufenen Fakten unterscheiden
+3. Ihnen helfen zu verstehen, wie sich die Meinungslage entwickelt hat
+
+\u3010Einsatzszenarien\u3011
+- Wenn der vollstaendige Entwicklungsverlauf eines Ereignisses verstanden werden muss
+- Wenn Meinungsaenderungen in verschiedenen Phasen verglichen werden muessen
+- Wenn umfassende Entitaets- und Beziehungsinformationen benoetigt werden
+
+\u3010Rueckgabeinhalte\u3011
+- Aktuell gueltige Fakten (neueste Simulationsergebnisse)
+- Historische/abgelaufene Fakten (Entwicklungsprotokoll)
+- Alle beteiligten Entitaeten""",
+
+ 'tool_desc_quick_search': """\
+\u3010Einfache Suche - Schnellrecherche\u3011
+Leichtgewichtiges Schnellrecherche-Werkzeug, geeignet fuer einfache, direkte Informationsabfragen.
+
+\u3010Einsatzszenarien\u3011
+- Wenn eine bestimmte Information schnell gefunden werden muss
+- Wenn ein Fakt verifiziert werden muss
+- Einfache Informationsrecherche
+
+\u3010Rueckgabeinhalte\u3011
+- Liste der zur Abfrage relevantesten Fakten""",
+
+ 'tool_desc_interview_agents': """\
+\u3010Tiefeninterview - Echte Agent-Befragung (Dual-Plattform)\u3011
+Ruft die Interview-API der OASIS-Simulationsumgebung auf, um laufende Simulations-Agents real zu befragen!
+Dies ist keine LLM-Simulation, sondern ein Aufruf der echten Interview-Schnittstelle fuer originale Antworten der Simulations-Agents.
+Standardmaessig werden Interviews auf beiden Plattformen Twitter und Reddit gleichzeitig gefuehrt, um umfassendere Standpunkte zu erhalten.
+
+Funktionsablauf:
+1. Automatisches Lesen der Persona-Datei, um alle Simulations-Agents kennenzulernen
+2. Intelligente Auswahl der zum Interviewthema relevantesten Agents (z.B. Studenten, Medien, Behoerden usw.)
+3. Automatische Generierung von Interviewfragen
+4. Aufruf der /api/simulation/interview/batch-Schnittstelle fuer echte Interviews auf beiden Plattformen
+5. Integration aller Interviewergebnisse mit Mehrperspektiven-Analyse
+
+\u3010Einsatzszenarien\u3011
+- Wenn Ereignismeinungen aus verschiedenen Rollenperspektiven verstanden werden muessen (Wie sehen Studenten das? Wie die Medien? Was sagen die Behoerden?)
+- Wenn Meinungen und Standpunkte mehrerer Parteien gesammelt werden muessen
+- Wenn echte Antworten von Simulations-Agents benoetigt werden (aus der OASIS-Simulationsumgebung)
+- Wenn der Bericht lebendiger sein soll und "Interviewprotokolle" enthalten soll
+
+\u3010Rueckgabeinhalte\u3011
+- Identitaetsinformationen der befragten Agents
+- Interviewantworten der einzelnen Agents auf beiden Plattformen Twitter und Reddit
+- Schluesselzitate (direkt zitierbar)
+- Interviewzusammenfassung und Standpunktvergleich
+
+\u3010Wichtig\u3011Die OASIS-Simulationsumgebung muss aktiv sein, um diese Funktion nutzen zu koennen!""",
+
+ # ── report_agent.py: ReACT loop messages ──
+
+ 'react_observation': """\
+Observation (Suchergebnisse):
+
+\u2550\u2550\u2550 Werkzeug {tool_name} Ergebnis \u2550\u2550\u2550
+{result}
+
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550
+Werkzeuge aufgerufen: {tool_calls_count}/{max_tool_calls} (Verwendet: {used_tools_str}){unused_hint}
+- Bei ausreichenden Informationen: Kapitelinhalt mit "Final Answer:" beginnen (obige Originaltexte muessen zitiert werden)
+- Bei Bedarf an mehr Informationen: Ein Werkzeug aufrufen und weiter recherchieren
+\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550""",
+
+ 'react_insufficient_tools': (
+ "\u3010Achtung\u3011Sie haben nur {tool_calls_count} Werkzeuge aufgerufen, mindestens {min_tool_calls} sind erforderlich. "
+ "Bitte rufen Sie weitere Werkzeuge auf, um mehr Simulationsdaten zu erhalten, bevor Sie Final Answer ausgeben. {unused_hint}"
+ ),
+
+ 'react_insufficient_tools_alt': (
+ "Bisher wurden nur {tool_calls_count} Werkzeuge aufgerufen, mindestens {min_tool_calls} sind erforderlich. "
+ "Bitte rufen Sie Werkzeuge auf, um Simulationsdaten zu erhalten. {unused_hint}"
+ ),
+
+ 'react_tool_limit': (
+ "Werkzeugaufruf-Limit erreicht ({tool_calls_count}/{max_tool_calls}), keine weiteren Werkzeugaufrufe moeglich. "
+ 'Bitte geben Sie sofort basierend auf den bereits erhaltenen Informationen den Kapitelinhalt mit "Final Answer:" am Anfang aus.'
+ ),
+
+ 'react_unused_tools_hint': "\n\U0001f4a1 Noch nicht verwendet: {unused_list}, Empfehlung: verschiedene Werkzeuge ausprobieren fuer Informationen aus mehreren Perspektiven",
+
+ 'react_force_final': "Werkzeugaufruf-Limit erreicht, bitte geben Sie direkt Final Answer: aus und erstellen Sie den Kapitelinhalt.",
+
+ # ── report_agent.py: Chat prompt ──
+
+ 'chat_system': """\
+Sie sind ein praeziser und effizienter Simulationsvorhersage-Assistent.
+
+\u3010Hintergrund\u3011
+Vorhersagebedingungen: {simulation_requirement}
+
+\u3010Bereits erstellter Analysebericht\u3011
+{report_content}
+
+\u3010Regeln\u3011
+1. Fragen bevorzugt basierend auf dem obigen Berichtsinhalt beantworten
+2. Fragen direkt beantworten, ausfuehrliche Denkausfuehrungen vermeiden
+3. Werkzeuge nur aufrufen, wenn der Berichtsinhalt zur Beantwortung nicht ausreicht
+4. Antworten sollen praegnant, klar und strukturiert sein
+
+\u3010Verfuegbare Werkzeuge\u3011(nur bei Bedarf verwenden, maximal 1-2 Aufrufe)
+{tools_description}
+
+\u3010Werkzeugaufruf-Format\u3011
+
+{{"name": "Werkzeugname", "parameters": {{"Parametername": "Parameterwert"}}}}
+
+
+\u3010Antwortstil\u3011
+- Praegnant und direkt, keine langen Abhandlungen
+- > Format fuer Zitate wichtiger Inhalte verwenden
+- Zuerst die Schlussfolgerung geben, dann die Begruendung erlaeutern""",
+
+ 'chat_observation_suffix': "\n\nBitte beantworten Sie die Frage praegnant.",
+
+ # ── zep_tools.py: InsightForge sub-query generation ──
+
+ 'insight_forge_sub_query_system': """Sie sind ein professioneller Experte f\u00fcr Fragenanalyse. Ihre Aufgabe ist es, eine komplexe Frage in mehrere Teilfragen zu zerlegen, die in der simulierten Welt unabh\u00e4ngig beobachtet werden k\u00f6nnen.
+
+Anforderungen:
+1. Jede Teilfrage sollte spezifisch genug sein, um in der simulierten Welt relevante Agent-Verhaltensweisen oder Ereignisse zu finden
+2. Die Teilfragen sollten verschiedene Dimensionen der urspr\u00fcnglichen Frage abdecken (z.B.: Wer, Was, Warum, Wie, Wann, Wo)
+3. Die Teilfragen sollten mit dem Simulationsszenario zusammenh\u00e4ngen
+4. R\u00fcckgabe im JSON-Format: {"sub_queries": ["Teilfrage1", "Teilfrage2", ...]}""",
+
+ 'insight_forge_sub_query_user': """Hintergrund der Simulationsanforderung:
+{simulation_requirement}
+
+{report_context_line}
+
+Bitte zerlegen Sie die folgende Frage in {max_queries} Teilfragen:
+{query}
+
+Geben Sie die Teilfragenliste im JSON-Format zur\u00fcck.""",
+
+ # ── zep_tools.py: Interview agent selection ──
+
+ 'interview_select_system': """Sie sind ein professioneller Interview-Planungsexperte. Ihre Aufgabe ist es, basierend auf den Interview-Anforderungen die am besten geeigneten Interviewpartner aus der Liste der simulierten Agents auszuw\u00e4hlen.
+
+Auswahlkriterien:
+1. Die Identit\u00e4t/der Beruf des Agents ist relevant f\u00fcr das Interviewthema
+2. Der Agent k\u00f6nnte einzigartige oder wertvolle Standpunkte vertreten
+3. W\u00e4hlen Sie vielf\u00e4ltige Perspektiven (z.B.: Bef\u00fcrworter, Gegner, Neutrale, Fachleute etc.)
+4. Bevorzugen Sie Rollen, die direkt mit dem Ereignis zusammenh\u00e4ngen
+
+R\u00fcckgabe im JSON-Format:
+{
+ "selected_indices": [Indexliste der ausgew\u00e4hlten Agents],
+ "reasoning": "Erkl\u00e4rung der Auswahlgr\u00fcnde"
+}""",
+
+ 'interview_select_user': """Interview-Anforderung:
+{interview_requirement}
+
+Simulationshintergrund:
+{simulation_requirement}
+
+Verf\u00fcgbare Agent-Liste (insgesamt {agent_count}):
+{agent_summaries_json}
+
+Bitte w\u00e4hlen Sie maximal {max_agents} der am besten geeigneten Agents f\u00fcr das Interview aus und erkl\u00e4ren Sie die Auswahlgr\u00fcnde.""",
+
+ # ── zep_tools.py: Interview question generation ──
+
+ 'interview_questions_system': """Sie sind ein professioneller Journalist/Interviewer. Generieren Sie basierend auf den Interview-Anforderungen 3-5 tiefgehende Interviewfragen.
+
+Anforderungen an die Fragen:
+1. Offene Fragen, die zu ausf\u00fchrlichen Antworten ermutigen
+2. Fragen, die je nach Rolle unterschiedliche Antworten hervorrufen k\u00f6nnen
+3. Abdeckung mehrerer Dimensionen wie Fakten, Meinungen, Gef\u00fchle etc.
+4. Nat\u00fcrliche Sprache, wie in einem echten Interview
+5. Jede Frage sollte maximal 50 W\u00f6rter umfassen, kurz und pr\u00e4gnant
+6. Direkte Fragestellung, ohne Hintergrundbeschreibung oder Pr\u00e4fix
+
+R\u00fcckgabe im JSON-Format: {"questions": ["Frage1", "Frage2", ...]}""",
+
+ 'interview_questions_user': """Interview-Anforderung: {interview_requirement}
+
+Simulationshintergrund: {simulation_requirement}
+
+Rollen der Interviewpartner: {agent_roles}
+
+Bitte generieren Sie 3-5 Interviewfragen.""",
+
+ # ── zep_tools.py: Interview summary generation ──
+
+ 'interview_summary_system': """Sie sind ein professioneller Nachrichtenredakteur. Bitte erstellen Sie basierend auf den Antworten mehrerer Befragter eine Interviewzusammenfassung.
+
+Anforderungen an die Zusammenfassung:
+1. Destillieren Sie die Hauptstandpunkte aller Parteien
+2. Weisen Sie auf Konsens und Meinungsverschiedenheiten hin
+3. Heben Sie wertvolle Zitate hervor
+4. Bleiben Sie objektiv und neutral, bevorzugen Sie keine Seite
+5. Maximal 1000 W\u00f6rter
+
+Formatvorgaben (m\u00fcssen eingehalten werden):
+- Verwenden Sie Klartext-Abs\u00e4tze, trennen Sie verschiedene Abschnitte mit Leerzeilen
+- Verwenden Sie keine Markdown-\u00dcberschriften (wie #, ##, ###)
+- Verwenden Sie keine Trennlinien (wie ---, ***)
+- Verwenden Sie beim Zitieren von Befragten deutsche Anf\u00fchrungszeichen \u201e\u201c
+- Sie k\u00f6nnen **Fettdruck** f\u00fcr Schl\u00fcsselw\u00f6rter verwenden, aber keine andere Markdown-Syntax""",
+
+ 'interview_summary_user': """Interviewthema: {interview_requirement}
+
+Interviewinhalte:
+{interview_texts}
+
+Bitte erstellen Sie eine Interviewzusammenfassung.""",
+
+ # ── zep_tools.py: Interview prompt prefix (sent to OASIS agents) ──
+
+ 'interview_prompt_prefix': (
+ "Sie werden gerade interviewt. Bitte beantworten Sie die folgenden Fragen "
+ "basierend auf Ihrer Pers\u00f6nlichkeit, allen bisherigen Erinnerungen und Handlungen "
+ "direkt als Klartext.\n"
+ "Antwortanforderungen:\n"
+ "1. Antworten Sie direkt in nat\u00fcrlicher Sprache, rufen Sie keine Werkzeuge auf\n"
+ "2. Bleiben Sie in Ihrer Rolle, antworten Sie aus Ihrer Perspektive\n"
+ "3. Antworten Sie ausf\u00fchrlich und detailliert, mindestens 3-5 S\u00e4tze pro Frage\n"
+ "4. Beantworten Sie jede Frage der Reihe nach, beginnen Sie jede Antwort mit \u201eFrage X:\u201c (X = Fragenummer)\n"
+ "5. Wenn Sie sich bei einer Frage unsicher sind, antworten Sie basierend auf Ihrer Pers\u00f6nlichkeit und Ihren Erfahrungen\n\n"
+ ),
+
+ # ── simulation.py: Interview prompt prefix (API endpoint) ──
+
+ 'simulation_interview_prompt_prefix': "Basierend auf Ihrer Pers\u00f6nlichkeit, allen bisherigen Erinnerungen und Handlungen, antworten Sie direkt als Text ohne Werkzeugaufrufe:",
+
+ # ── ontology_generator.py ──
+
+ 'ontology_system': """Sie sind ein professioneller Experte fuer Wissensgraph-Ontologie-Design. Ihre Aufgabe ist es, den gegebenen Textinhalt und die Simulationsanforderungen zu analysieren und geeignete Entitaets- und Beziehungstypen fuer eine **Social-Media-Meinungssimulation** zu entwerfen.
+
+**Wichtig: Sie muessen gueltige JSON-Daten ausgeben und keinen anderen Inhalt.**
+
+## Kernaufgabe - Hintergrund
+
+Wir bauen ein **Social-Media-Meinungssimulationssystem** auf. In diesem System:
+- Jede Entitaet ist ein "Konto" oder "Akteur", der in sozialen Medien posten, interagieren und Informationen verbreiten kann
+- Entitaeten beeinflussen sich gegenseitig, teilen, kommentieren und reagieren aufeinander
+- Wir muessen die Reaktionen verschiedener Parteien bei Meinungsereignissen und die Wege der Informationsverbreitung simulieren
+
+Daher **muessen Entitaeten real existierende Akteure sein, die in sozialen Medien posten und interagieren koennen**:
+
+**Erlaubt**:
+- Konkrete Einzelpersonen (oeffentliche Persoenlichkeiten, Beteiligte, Meinungsfuehrer, Experten, normale Buerger)
+- Unternehmen und Firmen (einschliesslich ihrer offiziellen Konten)
+- Organisationen (Universitaeten, Verbaende, NGOs, Gewerkschaften usw.)
+- Regierungsbehoerden, Aufsichtsbehoerden
+- Medienorganisationen (Zeitungen, Fernsehsender, unabhaengige Medien, Webseiten)
+- Social-Media-Plattformen selbst
+- Vertreter bestimmter Gruppen (z.B. Alumni-Vereine, Fangruppen, Interessengruppen usw.)
+
+**Nicht erlaubt**:
+- Abstrakte Konzepte (z.B. "oeffentliche Meinung", "Stimmung", "Trend")
+- Themen/Diskussionsgegenstaende (z.B. "akademische Integritaet", "Bildungsreform")
+- Standpunkte/Haltungen (z.B. "Befuerworter", "Gegner")
+
+## Ausgabeformat
+
+Bitte geben Sie JSON im folgenden Format aus:
+
+```json
+{
+ "entity_types": [
+ {
+ "name": "Entitaetstypname (Englisch, PascalCase)",
+ "description": "Kurzbeschreibung (Englisch, max. 100 Zeichen)",
+ "attributes": [
+ {
+ "name": "Attributname (Englisch, snake_case)",
+ "type": "text",
+ "description": "Attributbeschreibung"
+ }
+ ],
+ "examples": ["Beispielentitaet1", "Beispielentitaet2"]
+ }
+ ],
+ "edge_types": [
+ {
+ "name": "Beziehungstypname (Englisch, UPPER_SNAKE_CASE)",
+ "description": "Kurzbeschreibung (Englisch, max. 100 Zeichen)",
+ "source_targets": [
+ {"source": "Quellentitaetstyp", "target": "Zielentitaetstyp"}
+ ],
+ "attributes": []
+ }
+ ],
+ "analysis_summary": "Kurze Analysebeschreibung des Textinhalts (auf Deutsch)"
+}
+```
+
+## Designrichtlinien (aeusserst wichtig!)
+
+### 1. Entitaetstyp-Design - Muss strikt eingehalten werden
+
+**Mengenanforderung: Es muessen genau 10 Entitaetstypen sein**
+
+**Hierarchieanforderung (muss sowohl spezifische als auch Auffangtypen enthalten)**:
+
+Ihre 10 Entitaetstypen muessen folgende Hierarchie aufweisen:
+
+A. **Auffangtypen (muessen enthalten sein, als letzte 2 in der Liste)**:
+ - `Person`: Auffangtyp fuer jede natuerliche Person. Wenn eine Person keinem spezifischeren Personentyp zugeordnet werden kann, wird sie hier eingeordnet.
+ - `Organization`: Auffangtyp fuer jede Organisation. Wenn eine Organisation keinem spezifischeren Organisationstyp zugeordnet werden kann, wird sie hier eingeordnet.
+
+B. **Spezifische Typen (8, basierend auf dem Textinhalt entworfen)**:
+ - Entwerfen Sie spezifischere Typen fuer die Hauptakteure im Text
+ - Beispiel: Bei akademischen Ereignissen koennen `Student`, `Professor`, `University` verwendet werden
+ - Beispiel: Bei wirtschaftlichen Ereignissen koennen `Company`, `CEO`, `Employee` verwendet werden
+
+**Warum Auffangtypen benoetigt werden**:
+- Im Text tauchen verschiedene Personen auf, wie "Grundschullehrer", "Passanten", "anonyme Internetnutzer"
+- Wenn kein spezifischer Typ passt, sollten sie unter `Person` eingeordnet werden
+- Ebenso sollten kleine Organisationen, temporaere Gruppen usw. unter `Organization` eingeordnet werden
+
+**Designprinzipien fuer spezifische Typen**:
+- Identifizieren Sie haeufig auftretende oder wichtige Akteurtypen im Text
+- Jeder spezifische Typ sollte klare Grenzen haben, Ueberschneidungen vermeiden
+- Die description muss den Unterschied zwischen diesem Typ und dem Auffangtyp klar beschreiben
+
+### 2. Beziehungstyp-Design
+
+- Anzahl: 6-10
+- Beziehungen sollten reale Verbindungen in Social-Media-Interaktionen widerspiegeln
+- Stellen Sie sicher, dass die source_targets der Beziehungen Ihre definierten Entitaetstypen abdecken
+
+### 3. Attribut-Design
+
+- 1-3 Schluesselattribute pro Entitaetstyp
+- **Achtung**: Attributnamen duerfen nicht `name`, `uuid`, `group_id`, `created_at`, `summary` verwenden (diese sind Systemreservierungen)
+- Empfohlen: `full_name`, `title`, `role`, `position`, `location`, `description` usw.
+
+## Entitaetstyp-Referenz
+
+**Personentypen (spezifisch)**:
+- Student: Student
+- Professor: Professor/Wissenschaftler
+- Journalist: Journalist
+- Celebrity: Prominenter/Influencer
+- Executive: Fuehrungskraft
+- Official: Regierungsbeamter
+- Lawyer: Rechtsanwalt
+- Doctor: Arzt
+
+**Personentypen (Auffang)**:
+- Person: Jede natuerliche Person (wird verwendet, wenn kein spezifischerer Typ passt)
+
+**Organisationstypen (spezifisch)**:
+- University: Hochschule
+- Company: Unternehmen
+- GovernmentAgency: Regierungsbehoerde
+- MediaOutlet: Medienorganisation
+- Hospital: Krankenhaus
+- School: Schule
+- NGO: Nichtregierungsorganisation
+
+**Organisationstypen (Auffang)**:
+- Organization: Jede Organisation (wird verwendet, wenn kein spezifischerer Typ passt)
+
+## Beziehungstyp-Referenz
+
+- WORKS_FOR: Arbeitet bei
+- STUDIES_AT: Studiert an
+- AFFILIATED_WITH: Gehoert zu
+- REPRESENTS: Vertritt
+- REGULATES: Beaufsichtigt
+- REPORTS_ON: Berichtet ueber
+- COMMENTS_ON: Kommentiert
+- RESPONDS_TO: Reagiert auf
+- SUPPORTS: Unterstuetzt
+- OPPOSES: Widerspricht
+- COLLABORATES_WITH: Kooperiert mit
+- COMPETES_WITH: Konkurriert mit""",
+
+ 'ontology_user_suffix': """
+Bitte entwerfen Sie auf Basis des obigen Inhalts geeignete Entitaets- und Beziehungstypen fuer eine Meinungssimulation.
+
+**Verbindliche Regeln**:
+1. Es muessen genau 10 Entitaetstypen ausgegeben werden
+2. Die letzten 2 muessen Auffangtypen sein: Person (Personen-Auffang) und Organization (Organisations-Auffang)
+3. Die ersten 8 sind spezifische Typen, die auf dem Textinhalt basieren
+4. Alle Entitaetstypen muessen real existierende Akteure sein, keine abstrakten Konzepte
+5. Attributnamen duerfen keine Reservierungen wie name, uuid, group_id verwenden, stattdessen full_name, org_name usw.""",
+
+ # ── simulation_config_generator.py ──
+
+ 'sim_config_time_system': "Sie sind ein Experte fuer Social-Media-Simulation. Reine JSON-Ausgabe, die Zeitkonfiguration muss passend zu deutschen Tagesablaeufen sein.",
+
+ 'sim_config_event_system': "Sie sind ein Experte fuer Meinungsanalyse. Reine JSON-Ausgabe. Beachten Sie, dass poster_type exakt mit den verfuegbaren Entitaetstypen uebereinstimmen muss.",
+
+ 'sim_config_agent_system': "Sie sind ein Experte fuer Social-Media-Verhaltensanalyse. Reine JSON-Ausgabe, die Konfiguration muss passend zu deutschen Tagesablaeufen sein.",
+
+ # ── oasis_profile_generator.py ──
+
+ 'profile_system': "Sie sind ein Experte fuer die Erstellung von Social-Media-Nutzerprofilen. Generieren Sie detaillierte, realistische Personenbeschreibungen fuer Meinungssimulationen, die die reale Situation bestmoeglich abbilden. Es muss gueltiges JSON-Format zurueckgegeben werden, alle Zeichenkettenwerte duerfen keine unescapten Zeilenumbrueche enthalten. Verwenden Sie Deutsch.",
+
+ 'profile_individual_user': """Generieren Sie eine detaillierte Social-Media-Nutzerpersona fuer die Entitaet, die die reale Situation bestmoeglich abbildet.
+
+Entitaetsname: {entity_name}
+Entitaetstyp: {entity_type}
+Entitaetszusammenfassung: {entity_summary}
+Entitaetsattribute: {attrs_str}
+
+Kontextinformationen:
+{context_str}
+
+Bitte generieren Sie JSON mit folgenden Feldern:
+
+1. bio: Social-Media-Kurzbiografie, 200 Zeichen
+2. persona: Detaillierte Personenbeschreibung (2000 Zeichen Klartext), muss enthalten:
+ - Basisinformationen (Alter, Beruf, Bildungshintergrund, Wohnort)
+ - Personenhintergrund (wichtige Erfahrungen, Verbindung zum Ereignis, soziale Beziehungen)
+ - Persoenlichkeitsmerkmale (MBTI-Typ, Kernpersoenlichkeit, Art des emotionalen Ausdrucks)
+ - Social-Media-Verhalten (Beitragshaeufigkeit, Inhaltspraeferenzen, Interaktionsstil, Sprachmerkmale)
+ - Standpunkte (Haltung zu Themen, Inhalte die provozieren/beruehren koennten)
+ - Einzigartige Merkmale (Redewendungen, besondere Erfahrungen, persoenliche Hobbys)
+ - Persoenliche Erinnerungen (wichtiger Teil der Persona, die Verbindung dieser Person zum Ereignis sowie deren bisherige Aktionen und Reaktionen im Ereignis beschreiben)
+3. age: Alter als Zahl (muss eine Ganzzahl sein)
+4. gender: Geschlecht, muss auf Englisch sein: "male" oder "female"
+5. mbti: MBTI-Typ (z.B. INTJ, ENFP usw.)
+6. country: Land (auf Deutsch, z.B. "Deutschland")
+7. profession: Beruf
+8. interested_topics: Array von Interessenthemen
+
+Wichtig:
+- Alle Feldwerte muessen Zeichenketten oder Zahlen sein, keine Zeilenumbrueche verwenden
+- persona muss eine zusammenhaengende Textbeschreibung sein
+- Verwenden Sie Deutsch (ausser beim gender-Feld, das muss auf Englisch male/female sein)
+- Inhalt muss mit den Entitaetsinformationen uebereinstimmen
+- age muss eine gueltige Ganzzahl sein, gender muss "male" oder "female" sein""",
+
+ 'profile_group_user': """Generieren Sie eine detaillierte Social-Media-Kontoeinstellung fuer eine Institutions-/Gruppenentitaet, die die reale Situation bestmoeglich abbildet.
+
+Entitaetsname: {entity_name}
+Entitaetstyp: {entity_type}
+Entitaetszusammenfassung: {entity_summary}
+Entitaetsattribute: {attrs_str}
+
+Kontextinformationen:
+{context_str}
+
+Bitte generieren Sie JSON mit folgenden Feldern:
+
+1. bio: Offizielle Konto-Kurzbiografie, 200 Zeichen, professionell und angemessen
+2. persona: Detaillierte Kontobeschreibung (2000 Zeichen Klartext), muss enthalten:
+ - Grundinformationen der Institution (offizieller Name, Art der Institution, Gruendungshintergrund, Hauptfunktionen)
+ - Kontopositionierung (Kontotyp, Zielgruppe, Kernfunktion)
+ - Kommunikationsstil (Sprachmerkmale, haeufig verwendete Ausdruecke, Tabuthemen)
+ - Veroeffentlichungsmerkmale (Inhaltstypen, Veroeffentlichungshaeufigkeit, aktive Zeitraeume)
+ - Standpunkt und Haltung (offizielle Position zu Kernthemen, Umgang mit Kontroversen)
+ - Besondere Hinweise (vertretenes Gruppenprofil, Betriebsgewohnheiten)
+ - Institutionelle Erinnerungen (wichtiger Teil der Institutions-Persona, die Verbindung dieser Institution zum Ereignis sowie deren bisherige Aktionen und Reaktionen im Ereignis beschreiben)
+3. age: Fest auf 30 (virtuelles Alter des Institutionskontos)
+4. gender: Fest auf "other" (Institutionskonten verwenden other fuer nicht-persoenlich)
+5. mbti: MBTI-Typ, zur Beschreibung des Kontostils, z.B. ISTJ fuer streng konservativ
+6. country: Land (auf Deutsch, z.B. "Deutschland")
+7. profession: Beschreibung der Institutionsfunktion
+8. interested_topics: Array der Interessenbereiche
+
+Wichtig:
+- Alle Feldwerte muessen Zeichenketten oder Zahlen sein, keine null-Werte erlaubt
+- persona muss eine zusammenhaengende Textbeschreibung sein, keine Zeilenumbrueche verwenden
+- Verwenden Sie Deutsch (ausser beim gender-Feld, das muss auf Englisch "other" sein)
+- age muss die Ganzzahl 30 sein, gender muss die Zeichenkette "other" sein
+- Institutionskonten muessen ihrer Identitaet und Positionierung entsprechend kommunizieren""",
+}
+
+
+# ═══════════════════════════════════════════════════════════════
+# FORMATS - Short format strings with placeholders
+# ═══════════════════════════════════════════════════════════════
+
+FORMATS = {
+ # ── zep_tools.py: SearchResult.to_text() ──
+ 'search_query': 'Suchanfrage: {query}',
+ 'search_results_found': '{count} relevante Ergebnisse gefunden',
+ 'search_relevant_facts_header': '\n### Relevante Fakten:',
+
+ # ── zep_tools.py: NodeInfo.to_text() ──
+ 'node_info': 'Entit\u00e4t: {name} (Typ: {entity_type})\nZusammenfassung: {summary}',
+ 'node_unknown_type': 'Unbekannter Typ',
+
+ # ── zep_tools.py: EdgeInfo.to_text() ──
+ 'edge_info': 'Beziehung: {source} --[{name}]--> {target}\nFakt: {fact}',
+ 'edge_validity': '\nG\u00fcltigkeit: {valid_at} - {invalid_at}',
+ 'edge_expired': ' (Abgelaufen: {expired_at})',
+ 'edge_valid_at_unknown': 'Unbekannt',
+ 'edge_invalid_at_default': 'Bis heute',
+
+ # ── zep_tools.py: InsightForgeResult.to_text() ──
+ 'insight_header': '## Tiefenanalyse der Zukunftsvorhersage',
+ 'insight_query': 'Analysefrage: {query}',
+ 'insight_scenario': 'Vorhersageszenario: {simulation_requirement}',
+ 'insight_stats_header': '\n### Vorhersagedatenstatistik',
+ 'insight_stats_facts': '- Relevante Vorhersagefakten: {count}',
+ 'insight_stats_entities': '- Beteiligte Entit\u00e4ten: {count}',
+ 'insight_stats_relations': '- Beziehungsketten: {count}',
+ 'insight_sub_queries_header': '\n### Analysierte Teilfragen',
+ 'insight_key_facts_header': '\n### \u3010Schl\u00fcsselfakten\u3011(Bitte diese Originaltexte im Bericht zitieren)',
+ 'insight_core_entities_header': '\n### \u3010Kernentit\u00e4ten\u3011',
+ 'insight_entity_line': '- **{name}** ({entity_type})',
+ 'insight_entity_summary': ' Zusammenfassung: "{summary}"',
+ 'insight_entity_facts_count': ' Relevante Fakten: {count}',
+ 'insight_relationship_chains_header': '\n### \u3010Beziehungsketten\u3011',
+
+ # ── zep_tools.py: PanoramaResult.to_text() ──
+ 'panorama_header': '## Breitensuchergebnisse (Zukunfts-Panorama)',
+ 'panorama_query': 'Abfrage: {query}',
+ 'panorama_stats_header': '\n### Statistikinformationen',
+ 'panorama_stats_nodes': '- Gesamtknoten: {count}',
+ 'panorama_stats_edges': '- Gesamtkanten: {count}',
+ 'panorama_stats_active': '- Aktuell g\u00fcltige Fakten: {count}',
+ 'panorama_stats_historical': '- Historische/abgelaufene Fakten: {count}',
+ 'panorama_active_header': '\n### \u3010Aktuell g\u00fcltige Fakten\u3011(Originale Simulationsergebnisse)',
+ 'panorama_historical_header': '\n### \u3010Historische/abgelaufene Fakten\u3011(Aufzeichnung des Entwicklungsverlaufs)',
+ 'panorama_entities_header': '\n### \u3010Beteiligte Entit\u00e4ten\u3011',
+ 'panorama_entity_line': '- **{name}** ({entity_type})',
+ 'panorama_entity_type_default': 'Entit\u00e4t',
+
+ # ── zep_tools.py: AgentInterview.to_text() ──
+ 'interview_agent_profile': '_Kurzprofil: {bio}_',
+ 'interview_key_quotes_header': '\n**Schl\u00fcsselzitate:**\n',
+
+ # ── zep_tools.py: InterviewResult.to_text() ──
+ 'interview_result_header': '## Tiefeninterview-Bericht',
+ 'interview_topic': '**Interviewthema:** {topic}',
+ 'interview_respondent_count': '**Anzahl Befragte:** {interviewed} / {total} simulierte Agenten',
+ 'interview_selection_reasoning_header': '\n### Begr\u00fcndung der Interviewpartner-Auswahl',
+ 'interview_record_header': '\n### Interviewprotokoll',
+ 'interview_entry_header': '\n#### Interview #{index}: {name}',
+ 'interview_no_record': '(Kein Interviewprotokoll)\n\n---',
+ 'interview_summary_header': '\n### Interviewzusammenfassung und Kernaussagen',
+
+ # ── zep_tools.py: Dual platform markers ──
+ 'interview_twitter_response': '\u3010Twitter-Plattform-Antwort\u3011\n{text}',
+ 'interview_reddit_response': '\u3010Reddit-Plattform-Antwort\u3011\n{text}',
+ 'interview_no_platform_response': '(Keine Antwort von dieser Plattform)',
+
+ # ── zep_graph_memory_updater.py: Activity descriptions ──
+ 'activity_create_post': 'hat einen Beitrag veroeffentlicht: \u300c{content}\u300d',
+ 'activity_create_post_empty': 'hat einen Beitrag veroeffentlicht',
+ 'activity_like_post': 'hat den Beitrag von {author} geliked: \u300c{content}\u300d',
+ 'activity_like_post_content_only': 'hat einen Beitrag geliked: \u300c{content}\u300d',
+ 'activity_like_post_author_only': 'hat einen Beitrag von {author} geliked',
+ 'activity_like_post_empty': 'hat einen Beitrag geliked',
+ 'activity_dislike_post': 'hat den Beitrag von {author} gedisliked: \u300c{content}\u300d',
+ 'activity_dislike_post_content_only': 'hat einen Beitrag gedisliked: \u300c{content}\u300d',
+ 'activity_dislike_post_author_only': 'hat einen Beitrag von {author} gedisliked',
+ 'activity_dislike_post_empty': 'hat einen Beitrag gedisliked',
+ 'activity_repost': 'hat den Beitrag von {author} weitergeleitet: \u300c{content}\u300d',
+ 'activity_repost_content_only': 'hat einen Beitrag weitergeleitet: \u300c{content}\u300d',
+ 'activity_repost_author_only': 'hat einen Beitrag von {author} weitergeleitet',
+ 'activity_repost_empty': 'hat einen Beitrag weitergeleitet',
+ 'activity_quote_post': 'hat den Beitrag von {author} zitiert \u300c{content}\u300d',
+ 'activity_quote_post_content_only': 'hat einen Beitrag zitiert \u300c{content}\u300d',
+ 'activity_quote_post_author_only': 'hat einen Beitrag von {author} zitiert',
+ 'activity_quote_post_empty': 'hat einen Beitrag zitiert',
+ 'activity_quote_comment': ', und kommentierte: \u300c{content}\u300d',
+ 'activity_follow': 'folgt dem Benutzer \u300c{name}\u300d',
+ 'activity_follow_empty': 'folgt einem Benutzer',
+ 'activity_create_comment': 'hat unter dem Beitrag von {author} \u300c{post_content}\u300d kommentiert: \u300c{content}\u300d',
+ 'activity_create_comment_post_only': 'hat unter dem Beitrag \u300c{post_content}\u300d kommentiert: \u300c{content}\u300d',
+ 'activity_create_comment_author_only': 'hat unter dem Beitrag von {author} kommentiert: \u300c{content}\u300d',
+ 'activity_create_comment_content_only': 'hat kommentiert: \u300c{content}\u300d',
+ 'activity_create_comment_empty': 'hat einen Kommentar veroeffentlicht',
+ 'activity_like_comment': 'hat den Kommentar von {author} geliked: \u300c{content}\u300d',
+ 'activity_like_comment_content_only': 'hat einen Kommentar geliked: \u300c{content}\u300d',
+ 'activity_like_comment_author_only': 'hat einen Kommentar von {author} geliked',
+ 'activity_like_comment_empty': 'hat einen Kommentar geliked',
+ 'activity_dislike_comment': 'hat den Kommentar von {author} gedisliked: \u300c{content}\u300d',
+ 'activity_dislike_comment_content_only': 'hat einen Kommentar gedisliked: \u300c{content}\u300d',
+ 'activity_dislike_comment_author_only': 'hat einen Kommentar von {author} gedisliked',
+ 'activity_dislike_comment_empty': 'hat einen Kommentar gedisliked',
+ 'activity_search': 'hat nach \u300c{query}\u300d gesucht',
+ 'activity_search_empty': 'hat eine Suche durchgefuehrt',
+ 'activity_search_user': 'hat nach Benutzer \u300c{query}\u300d gesucht',
+ 'activity_search_user_empty': 'hat nach Benutzern gesucht',
+ 'activity_mute': 'hat den Benutzer \u300c{name}\u300d stummgeschaltet',
+ 'activity_mute_empty': 'hat einen Benutzer stummgeschaltet',
+ 'activity_generic': 'hat die Aktion {action_type} ausgefuehrt',
+
+ # ── oasis_profile_generator.py: Zep search query ──
+ 'zep_entity_search_query': 'Alle Informationen, Aktivitaeten, Ereignisse, Beziehungen und Hintergruende zu {entity_name}',
+
+ # ── simulation_config_generator.py: Progress messages ──
+ 'sim_progress_time': 'Zeitkonfiguration wird generiert...',
+ 'sim_progress_events': 'Ereigniskonfiguration und Trendthemen werden generiert...',
+ 'sim_progress_agents': 'Agent-Konfiguration wird generiert ({start}-{end}/{total})...',
+ 'sim_progress_platform': 'Plattformkonfiguration wird generiert...',
+}
+
+
+# ═══════════════════════════════════════════════════════════════
+# STRINGS - UI labels, status messages, defaults
+# ═══════════════════════════════════════════════════════════════
+
+STRINGS = {
+ # ── report_agent.py: Defaults ──
+ 'report_default_title': 'Zukunftsvorhersage-Bericht',
+ 'report_default_summary': 'Analyse zukuenftiger Trends und Risiken basierend auf Simulationsvorhersagen',
+ 'report_fallback_section_1': 'Vorhersageszenarien und Kernerkenntnisse',
+ 'report_fallback_section_2': 'Analyse des vorhergesagten Gruppenverhaltens',
+ 'report_fallback_section_3': 'Trendausblick und Risikohinweise',
+ 'report_first_chapter_marker': '(Dies ist das erste Kapitel)',
+
+ # ── report_agent.py: Log messages ──
+ 'log_report_start': 'Berichtsgenerierungsaufgabe gestartet',
+ 'log_planning_start': 'Planung der Berichtsgliederung beginnt',
+ 'log_planning_context': 'Simulations-Kontextinformationen werden abgerufen',
+ 'log_planning_complete': 'Gliederungsplanung abgeschlossen',
+ 'log_section_start': 'Abschnittsgenerierung beginnt: {title}',
+ 'log_react_thought': 'ReACT Runde {iteration} Denken',
+ 'log_tool_call': 'Werkzeug aufgerufen: {tool_name}',
+ 'log_tool_result': 'Werkzeug {tool_name} hat Ergebnis zurueckgegeben',
+ 'log_llm_response': 'LLM-Antwort (Werkzeugaufruf: {has_tool_calls}, Endgueltige Antwort: {has_final_answer})',
+ 'log_section_content': 'Abschnitt {title} Inhaltsgenerierung abgeschlossen',
+ 'log_section_complete': 'Abschnitt {title} Generierung abgeschlossen',
+ 'log_report_complete': 'Berichtsgenerierung abgeschlossen',
+ 'log_error': 'Fehler aufgetreten: {error}',
+
+ # ── report_agent.py: Progress callbacks ──
+ 'progress_analyzing': 'Simulationsanforderungen werden analysiert...',
+ 'progress_outline': 'Berichtsgliederung wird erstellt...',
+ 'progress_outline_parsing': 'Gliederungsstruktur wird analysiert...',
+ 'progress_outline_complete': 'Gliederungsplanung abgeschlossen',
+ 'progress_generating': 'Tiefenrecherche und Verfassen ({tool_calls_count}/{max_tool_calls})',
+
+ # ── report_agent.py: Tool parameter descriptions ──
+ 'tool_param_insight_query': 'Die Frage oder das Thema, das Sie eingehend analysieren moechten',
+ 'tool_param_insight_context': 'Kontext des aktuellen Berichtskapitels (optional, hilft bei der Generierung praeziserer Teilfragen)',
+ 'tool_param_panorama_query': 'Suchabfrage, fuer Relevanzsortierung',
+ 'tool_param_panorama_expired': 'Ob abgelaufene/historische Inhalte einbezogen werden sollen (Standard: True)',
+ 'tool_param_quick_query': 'Suchabfrage-Zeichenkette',
+ 'tool_param_quick_limit': 'Anzahl der zurueckgegebenen Ergebnisse (optional, Standard: 10)',
+ 'tool_param_interview_topic': "Interviewthema oder Anforderungsbeschreibung (z.B.: 'Meinungen der Studenten zum Formaldehyd-Vorfall im Wohnheim erfahren')",
+ 'tool_param_interview_max': 'Maximale Anzahl der zu befragenden Agents (optional, Standard: 5, Maximum: 10)',
+
+ # ── report_agent.py: Error messages ──
+ 'error_unknown_tool': 'Unbekanntes Werkzeug: {tool_name}. Bitte eines der folgenden verwenden: insight_forge, panorama_search, quick_search',
+ 'error_tool_execution': 'Werkzeugausfuehrung fehlgeschlagen: {error}',
+
+ # ── zep_tools.py: Fallback sub-queries ──
+ 'fallback_sub_query_main': 'Hauptbeteiligte bei {query}',
+ 'fallback_sub_query_cause': 'Ursachen und Auswirkungen von {query}',
+ 'fallback_sub_query_progress': 'Entwicklungsverlauf von {query}',
+
+ # ── zep_tools.py: Interview fallback strings ──
+ 'interview_no_profiles': 'Keine Agent-Profildateien f\u00fcr Interviews gefunden',
+ 'interview_api_failed': 'Interview-API-Aufruf fehlgeschlagen: {error}. Bitte OASIS-Simulationsumgebung pr\u00fcfen.',
+ 'interview_env_failed': 'Interview fehlgeschlagen: {error}. Simulationsumgebung m\u00f6glicherweise beendet, bitte sicherstellen, dass OASIS l\u00e4uft.',
+ 'interview_exception': 'Fehler w\u00e4hrend des Interviews: {error}',
+ 'interview_no_completed': 'Kein Interview abgeschlossen',
+ 'interview_fallback_question': 'Was ist Ihre Meinung zu {topic}?',
+ 'interview_fallback_question_2': 'Welche Auswirkungen hat dies auf Sie oder die Gruppe, die Sie vertreten?',
+ 'interview_fallback_question_3': 'Wie sollte dieses Problem Ihrer Meinung nach gel\u00f6st oder verbessert werden?',
+ 'interview_fallback_summary': 'Insgesamt {count} Befragte interviewt, darunter: {names}',
+ 'interview_auto_selection': 'Automatische Auswahl basierend auf Relevanz',
+ 'interview_default_selection': 'Standardauswahlstrategie verwendet',
+ 'interview_auto_reasoning': '(Automatische Auswahl)',
+
+ # ── zep_tools.py: Misc ──
+ 'profession_unknown': 'Unbekannt',
+
+ # ── zep_graph_memory_updater.py ──
+ 'platform_twitter': 'Welt 1',
+ 'platform_reddit': 'Welt 2',
+
+ # ── simulation.py: API error messages ──
+ 'error_zep_not_configured': 'ZEP_API_KEY nicht konfiguriert',
+ 'error_entity_not_found': 'Entitaet existiert nicht: {uuid}',
+ 'error_project_not_found': 'Projekt existiert nicht: {project_id}',
+ 'error_no_project_id': 'Bitte geben Sie eine project_id an',
+ 'error_no_graph': 'Fuer das Projekt wurde noch kein Graph aufgebaut, bitte zuerst /api/graph/build aufrufen',
+
+ # ── oasis_profile_generator.py ──
+ 'default_country': 'Deutschland',
+
+ # ── simulation_config_generator.py: Default reasoning ──
+ 'sim_default_time_reasoning': 'Standard-Konfiguration basierend auf chinesischem Tagesablauf (1 Stunde pro Runde)',
+ 'sim_default_event_reasoning': 'Standard-Konfiguration verwendet',
+
+ # ── ontology_generator.py: Text truncation notice ──
+ 'ontology_text_truncated': '...(Originaltext umfasst {original_length} Zeichen, die ersten {max_length} Zeichen wurden fuer die Ontologie-Analyse verwendet)...',
+}
+
+
+# ═══════════════════════════════════════════════════════════════
+# PATTERNS - Regex patterns that match German FORMATS
+# ═══════════════════════════════════════════════════════════════
+
+PATTERNS = {
+ # ── Matches FORMATS['insight_query'] ──
+ 'insight_query': r'Analysefrage:\s*(.+?)(?:\n|$)',
+
+ # ── Matches FORMATS['insight_scenario'] ──
+ 'insight_scenario': r'Vorhersageszenario:\s*(.+?)(?:\n|$)',
+
+ # ── Matches FORMATS['search_query'] ──
+ 'search_query': r'Suchanfrage:\s*(.+?)(?:\n|$)',
+
+ # ── Matches FORMATS['search_results_found'] ──
+ 'search_results_found': r'(\d+)\s*relevante Ergebnisse gefunden',
+
+ # ── Matches FORMATS['panorama_query'] ──
+ 'panorama_query': r'Abfrage:\s*(.+?)(?:\n|$)',
+
+ # ── "Final Answer:" is a technical marker that stays in English,
+ # but we also match the German translation ──
+ 'final_answer': r'(?:Final Answer|Endg\u00fcltige Antwort)\s*:\s*',
+
+ # ── Matches interview question numbering pattern (used in key_quotes cleanup) ──
+ 'interview_question_prefix': r'Frage\s*\d+[:\uff1a]\s*',
+
+ # ── Matches platform response markers in interview output ──
+ 'interview_twitter_marker': r'\u3010Twitter-Plattform-Antwort\u3011',
+ 'interview_reddit_marker': r'\u3010Reddit-Plattform-Antwort\u3011',
+
+ # ── Tool call XML tag ──
+ 'tool_call_xml': r'\s*(\{.*?\})\s*',
+}
diff --git a/backend/app/i18n/en.py b/backend/app/i18n/en.py
new file mode 100644
index 000000000..dbda366a8
--- /dev/null
+++ b/backend/app/i18n/en.py
@@ -0,0 +1,1158 @@
+"""English language pack"""
+
+# ═══════════════════════════════════════════════════════════════
+# PROMPTS – full LLM prompt templates
+# ═══════════════════════════════════════════════════════════════
+
+PROMPTS = {
+
+ # ── Report planning ──────────────────────────────────────
+
+ 'report_plan_system': """\
+You are an expert author of "Future Prediction Reports" who possesses a "God's-eye view" of a simulated world — you can observe every Agent's behavior, statements, and interactions.
+
+[Core Concept]
+We have built a simulated world and injected a specific "simulation requirement" as a variable. The evolution of that simulated world constitutes a prediction of what might happen in the future. What you are observing is not "experimental data" but a "rehearsal of the future."
+
+[Your Task]
+Write a "Future Prediction Report" that answers:
+1. Under the conditions we set, what happened in the future?
+2. How did each category of Agent (population group) react and act?
+3. What noteworthy future trends and risks does this simulation reveal?
+
+[Report Positioning]
+- This is a simulation-based future prediction report revealing "if this happens, what will the future look like."
+- Focus on prediction outcomes: event trajectories, group reactions, emergent phenomena, potential risks.
+- Statements and actions of Agents in the simulated world are predictions of future human behavior.
+- This is NOT an analysis of the current real world.
+- This is NOT a generic public-opinion summary.
+
+[Section Count Limit]
+- Minimum 2 sections, maximum 5 sections.
+- No sub-sections needed; each section should contain complete content directly.
+- Content should be concise, focused on core predictive findings.
+- The section structure is up to you based on the prediction results.
+
+Please output a JSON report outline in the following format:
+{
+ "title": "Report title",
+ "summary": "Report summary (one sentence summarizing the core predictive findings)",
+ "sections": [
+ {
+ "title": "Section title",
+ "description": "Section content description"
+ }
+ ]
+}
+
+Note: the sections array must have at least 2 and at most 5 elements!""",
+
+ 'report_plan_user': """\
+[Prediction Scenario Setup]
+The variable injected into the simulated world (simulation requirement): {simulation_requirement}
+
+[Simulated World Scale]
+- Number of entities in the simulation: {total_nodes}
+- Number of relationships generated: {total_edges}
+- Entity type distribution: {entity_types}
+- Number of active Agents: {total_entities}
+
+[Sample Future Facts Predicted by Simulation]
+{related_facts_json}
+
+Please examine this future rehearsal from a "God's-eye view":
+1. Under the conditions we set, what state did the future take on?
+2. How did each category of people (Agents) react and act?
+3. What noteworthy future trends does this simulation reveal?
+
+Design the most appropriate report section structure based on the prediction results.
+
+[Reminder] Report section count: minimum 2, maximum 5. Content should be concise and focused on core predictive findings.""",
+
+ # ── Section generation ───────────────────────────────────
+
+ 'section_system': """\
+You are an expert author of "Future Prediction Reports," currently writing one section of the report.
+
+Report title: {report_title}
+Report summary: {report_summary}
+Prediction scenario (simulation requirement): {simulation_requirement}
+
+Current section to write: {section_title}
+
+===============================================================
+[Core Concept]
+===============================================================
+
+The simulated world is a rehearsal of the future. We injected specific conditions (simulation requirements) into it, and the Agents' behavior and interactions are predictions of future human behavior.
+
+Your task is to:
+- Reveal what happened in the future under the set conditions
+- Predict how each group of people (Agents) reacted and acted
+- Discover noteworthy future trends, risks, and opportunities
+
+Do NOT write an analysis of the current real world.
+DO focus on "what will the future look like" — simulation results ARE the predicted future.
+
+===============================================================
+[Most Important Rules — Must Follow]
+===============================================================
+
+1. [Must Call Tools to Observe the Simulated World]
+ - You are observing a rehearsal of the future from a "God's-eye view"
+ - All content must come from events and Agent behaviors in the simulated world
+ - Do NOT use your own knowledge to write report content
+ - Call tools at least 3 times (up to 5) per section to observe the simulated world that represents the future
+
+2. [Must Quote Agents' Original Statements and Actions]
+ - Agent statements and behaviors are predictions of future human behavior
+ - Use quotation format to present these predictions, for example:
+ > "A certain group would say: original content..."
+ - These quotations are the core evidence of the simulation predictions
+
+3. [Language Consistency — Quoted Content Must Be in Report Language]
+ - Tool results may contain content in other languages or mixed languages
+ - The report must be written entirely in English
+ - When quoting non-English content from tools, translate it into fluent English before including it in the report
+ - Preserve the original meaning while ensuring natural expression
+ - This rule applies to both body text and quotation blocks (> format)
+
+4. [Faithfully Present Prediction Results]
+ - Report content must reflect the simulation results representing the future
+ - Do NOT add information that does not exist in the simulation
+ - If information is insufficient in certain areas, state so honestly
+
+===============================================================
+[Format Specifications — Extremely Important!]
+===============================================================
+
+[One Section = Smallest Content Unit]
+- Each section is the smallest division of the report
+- Do NOT use any Markdown headings (#, ##, ###, #### etc.) within a section
+- Do NOT add the section title at the beginning of the content
+- The section title is added automatically by the system; you only write the body text
+- Use **bold**, paragraph breaks, quotations, and lists to organize content — but no headings
+
+[Correct Example]
+```
+This section analyzes the public opinion propagation dynamics of the event. Through deep analysis of simulation data, we found...
+
+**Initial Ignition Phase**
+
+As the primary platform for public opinion, the first wave of information...
+
+> "The platform contributed 68% of the initial voice volume..."
+
+**Emotion Amplification Phase**
+
+The secondary platform further amplified the event's impact:
+
+- Strong visual impact
+- High emotional resonance
+```
+
+[Incorrect Example]
+```
+## Executive Summary <- Wrong! No headings
+### 1. Initial Phase <- Wrong! No ### sub-headings
+#### 1.1 Detailed Analysis <- Wrong! No #### sub-sub-headings
+
+This section analyzes...
+```
+
+===============================================================
+[Available Retrieval Tools] (call 3-5 times per section)
+===============================================================
+
+{tools_description}
+
+[Tool Usage Tips — Mix different tools, do not use only one]
+- insight_forge: Deep insight analysis; automatically decomposes questions and retrieves facts and relationships from multiple dimensions
+- panorama_search: Wide-angle panoramic search; understand the full picture, timeline, and evolution of events
+- quick_search: Quick verification of a specific data point
+- interview_agents: Interview simulated Agents to get first-person perspectives and authentic reactions from different roles
+
+===============================================================
+[Workflow]
+===============================================================
+
+Each reply can only do ONE of the following two things (not both):
+
+Option A — Call a tool:
+Output your thinking, then call one tool in this format:
+
+{{"name": "tool_name", "parameters": {{"param": "value"}}}}
+
+The system will execute the tool and return the results. You must not write tool results yourself.
+
+Option B — Output final content:
+When you have gathered enough information through tools, begin with "Final Answer:" and output the section content.
+
+Strict prohibitions:
+- Do NOT include both a tool call and a Final Answer in the same reply
+- Do NOT fabricate tool results (Observations); all tool results are injected by the system
+- Call at most one tool per reply
+
+===============================================================
+[Section Content Requirements]
+===============================================================
+
+1. Content must be based on simulation data retrieved via tools
+2. Extensively quote original text to showcase simulation results
+3. Use Markdown formatting (but no headings):
+ - Use **bold text** to highlight key points (instead of sub-headings)
+ - Use lists (- or 1. 2. 3.) to organize key points
+ - Use blank lines to separate paragraphs
+ - Do NOT use #, ##, ###, #### or any heading syntax
+4. [Quotation Format — Must Be a Separate Paragraph]
+ Quotations must stand alone as their own paragraph with a blank line before and after:
+
+ Correct:
+ ```
+ The response was considered lacking in substance.
+
+ > "The response pattern appeared rigid and slow in the fast-moving social media environment."
+
+ This assessment reflects widespread public dissatisfaction.
+ ```
+
+ Incorrect:
+ ```
+ The response was considered lacking in substance. > "The response pattern..." This assessment reflects...
+ ```
+5. Maintain logical coherence with other sections
+6. [Avoid Repetition] Carefully read the completed sections below; do not repeat the same information
+7. [Reminder] Do NOT add any headings! Use **bold** instead of sub-section titles""",
+
+ 'section_user': """\
+Completed section content (please read carefully to avoid repetition):
+{previous_content}
+
+===============================================================
+[Current Task] Write section: {section_title}
+===============================================================
+
+[Important Reminders]
+1. Carefully read the completed sections above to avoid repeating the same content!
+2. You must call tools to retrieve simulation data before writing
+3. Mix different tools; do not use only one kind
+4. Report content must come from retrieval results; do not use your own knowledge
+
+[Format Warning — Must Follow]
+- Do NOT write any headings (#, ##, ###, #### are all prohibited)
+- Do NOT write "{section_title}" as the opening
+- The section title is added automatically by the system
+- Write body text directly; use **bold** instead of sub-section titles
+
+Begin:
+1. First think (Thought) about what information this section needs
+2. Then call a tool (Action) to retrieve simulation data
+3. After gathering enough information, output Final Answer (body text only, no headings)""",
+
+ # ── ReACT loop observation ───────────────────────────────
+
+ 'react_observation': """\
+Observation (retrieval results):
+
+=== Tool {tool_name} returned ===
+{result}
+
+===============================================================
+Tools called {tool_calls_count}/{max_tool_calls} times (used: {used_tools_str}){unused_hint}
+- If information is sufficient: begin with "Final Answer:" and output section content (must quote the above original text)
+- If more information is needed: call another tool to continue retrieval
+===============================================================""",
+
+ 'react_insufficient_tools': (
+ "[Notice] You have only called tools {tool_calls_count} time(s); at least {min_tool_calls} calls are required. "
+ "Please call more tools to retrieve additional simulation data before outputting Final Answer.{unused_hint}"
+ ),
+
+ 'react_insufficient_tools_alt': (
+ "Currently only {tool_calls_count} tool call(s) made; at least {min_tool_calls} required. "
+ "Please call a tool to retrieve simulation data.{unused_hint}"
+ ),
+
+ 'react_tool_limit': (
+ "Tool call limit reached ({tool_calls_count}/{max_tool_calls}); no more tool calls allowed. "
+ 'Please immediately output section content beginning with "Final Answer:" based on the information already gathered.'
+ ),
+
+ 'react_unused_tools_hint': "\nYou have not yet used: {unused_list}. Consider trying different tools for multi-angle information.",
+
+ 'react_force_final': 'Tool call limit reached. Please output Final Answer: and generate the section content now.',
+
+ # ── Chat prompt ──────────────────────────────────────────
+
+ 'chat_system': """\
+You are a concise and efficient simulation prediction assistant.
+
+[Background]
+Prediction conditions: {simulation_requirement}
+
+[Generated Analysis Report]
+{report_content}
+
+[Rules]
+1. Prioritize answering questions based on the report content above
+2. Answer questions directly; avoid lengthy reasoning
+3. Only call tools for more data when the report content is insufficient
+4. Answers should be concise, clear, and well-organized
+
+[Available Tools] (use only when needed, max 1-2 calls)
+{tools_description}
+
+[Tool Call Format]
+
+{{"name": "tool_name", "parameters": {{"param": "value"}}}}
+
+
+[Answer Style]
+- Concise and direct; no lengthy essays
+- Use > format to quote key content
+- Give the conclusion first, then explain the reasoning""",
+
+ 'chat_observation_suffix': "\n\nPlease answer the question concisely.",
+
+ # ── Tool descriptions ────────────────────────────────────
+
+ 'tool_desc_insight_forge': """\
+[Deep Insight Retrieval — Powerful Retrieval Tool]
+This is our most powerful retrieval function, designed for deep analysis. It will:
+1. Automatically decompose your question into multiple sub-questions
+2. Retrieve information from the simulation graph across multiple dimensions
+3. Integrate results from semantic search, entity analysis, and relationship chain tracking
+4. Return the most comprehensive and in-depth retrieval content
+
+[Use Cases]
+- Need to deeply analyze a topic
+- Need to understand multiple aspects of an event
+- Need rich material to support a report section
+
+[Returns]
+- Relevant original facts (can be quoted directly)
+- Core entity insights
+- Relationship chain analysis""",
+
+ 'tool_desc_panorama_search': """\
+[Panoramic Search — Full-Picture View]
+This tool is for getting a complete overview of simulation results, especially suitable for understanding event evolution. It will:
+1. Retrieve all relevant nodes and relationships
+2. Distinguish between currently valid facts and historical/expired facts
+3. Help you understand how public opinion evolved
+
+[Use Cases]
+- Need to understand the complete development trajectory of an event
+- Need to compare public opinion changes across different phases
+- Need comprehensive entity and relationship information
+
+[Returns]
+- Currently valid facts (latest simulation results)
+- Historical/expired facts (evolution records)
+- All involved entities""",
+
+ 'tool_desc_quick_search': """\
+[Quick Search — Fast Retrieval]
+A lightweight, fast retrieval tool suitable for simple, direct information queries.
+
+[Use Cases]
+- Need to quickly find a specific piece of information
+- Need to verify a fact
+- Simple information retrieval
+
+[Returns]
+- List of facts most relevant to the query""",
+
+ 'tool_desc_interview_agents': """\
+[In-Depth Interview — Real Agent Interviews (Dual Platform)]
+Calls the OASIS simulation environment's interview API to conduct real interviews with running simulation Agents!
+This is not LLM simulation — it calls the real interview interface to get original responses from simulation Agents.
+By default, interviews are conducted simultaneously on both the Twitter and Reddit platforms for more comprehensive perspectives.
+
+Workflow:
+1. Automatically reads profile files to understand all simulation Agents
+2. Intelligently selects Agents most relevant to the interview topic (e.g., students, media, officials, etc.)
+3. Automatically generates interview questions
+4. Calls /api/simulation/interview/batch for real dual-platform interviews
+5. Integrates all interview results for multi-perspective analysis
+
+[Use Cases]
+- Need to understand event perspectives from different roles (What do students think? Media? Officials?)
+- Need to collect opinions and positions from multiple parties
+- Need authentic responses from simulation Agents (from the OASIS simulation environment)
+- Want to make the report more vivid with "interview transcripts"
+
+[Returns]
+- Identity information of interviewed Agents
+- Interview responses from each Agent on both Twitter and Reddit platforms
+- Key quotes (can be cited directly)
+- Interview summary and viewpoint comparison
+
+[Important] The OASIS simulation environment must be running to use this feature!""",
+
+ # ── Ontology generator ───────────────────────────────────
+
+ 'ontology_system': """\
+You are a professional knowledge graph ontology design expert. Your task is to analyze given text content and simulation requirements, and design entity types and relationship types suitable for **social media public opinion simulation**.
+
+**Important: You must output valid JSON format data; do not output anything else.**
+
+## Core Task Background
+
+We are building a **social media public opinion simulation system**. In this system:
+- Each entity is an "account" or "subject" that can speak, interact, and spread information on social media
+- Entities will influence, repost, comment on, and respond to each other
+- We need to simulate the reactions and information propagation paths of various parties in public opinion events
+
+Therefore, **entities must be real-world subjects that can speak and interact on social media**:
+
+**Acceptable**:
+- Specific individuals (public figures, parties involved, opinion leaders, scholars, ordinary people)
+- Companies and enterprises (including their official accounts)
+- Organizations (universities, associations, NGOs, unions, etc.)
+- Government departments, regulatory agencies
+- Media organizations (newspapers, TV stations, self-media, websites)
+- Social media platforms themselves
+- Representatives of specific groups (alumni associations, fan groups, advocacy groups, etc.)
+
+**Not acceptable**:
+- Abstract concepts (e.g., "public opinion," "emotion," "trend")
+- Topics/themes (e.g., "academic integrity," "education reform")
+- Viewpoints/attitudes (e.g., "supporters," "opponents")
+
+## Output Format
+
+Please output JSON with the following structure:
+
+```json
+{
+ "entity_types": [
+ {
+ "name": "Entity type name (English, PascalCase)",
+ "description": "Brief description (English, max 100 characters)",
+ "attributes": [
+ {
+ "name": "attribute_name (English, snake_case)",
+ "type": "text",
+ "description": "Attribute description"
+ }
+ ],
+ "examples": ["Example entity 1", "Example entity 2"]
+ }
+ ],
+ "edge_types": [
+ {
+ "name": "Relationship type name (English, UPPER_SNAKE_CASE)",
+ "description": "Brief description (English, max 100 characters)",
+ "source_targets": [
+ {"source": "Source entity type", "target": "Target entity type"}
+ ],
+ "attributes": []
+ }
+ ],
+ "analysis_summary": "Brief analysis of text content"
+}
+```
+
+## Design Guidelines (Extremely Important!)
+
+### 1. Entity Type Design — Must Strictly Follow
+
+**Quantity requirement: Exactly 10 entity types**
+
+**Hierarchy requirements (must include both specific types and fallback types)**:
+
+Your 10 entity types must include:
+
+A. **Fallback types (required, placed last 2 in the list)**:
+ - `Person`: Fallback type for any individual. When a person does not belong to a more specific type, classify as Person.
+ - `Organization`: Fallback type for any organization. When an organization does not belong to a more specific type, classify as Organization.
+
+B. **Specific types (8, designed based on text content)**:
+ - Design more specific types for major roles appearing in the text
+ - Example: if the text involves an academic event, you might have `Student`, `Professor`, `University`
+ - Example: if the text involves a business event, you might have `Company`, `CEO`, `Employee`
+
+**Why fallback types are needed**:
+- Various people appear in text, such as "elementary school teachers," "bystanders," "netizens"
+- If no specific type matches, they should be classified as `Person`
+- Similarly, small organizations, temporary groups, etc. should be classified as `Organization`
+
+**Design principles for specific types**:
+- Identify frequently appearing or key role types from the text
+- Each specific type should have clear boundaries; avoid overlap
+- The description must clearly explain how this type differs from the fallback type
+
+### 2. Relationship Type Design
+
+- Quantity: 6-10
+- Relationships should reflect real connections in social media interactions
+- Ensure the source_targets cover the entity types you defined
+
+### 3. Attribute Design
+
+- 1-3 key attributes per entity type
+- **Note**: Attribute names cannot use `name`, `uuid`, `group_id`, `created_at`, `summary` (these are system reserved)
+- Recommended: `full_name`, `title`, `role`, `position`, `location`, `description`, etc.
+
+## Entity Type Reference
+
+**Individual (Specific)**:
+- Student
+- Professor
+- Journalist
+- Celebrity
+- Executive
+- Official: Government official
+- Lawyer
+- Doctor
+
+**Individual (Fallback)**:
+- Person: Any individual (used when not matching other specific types)
+
+**Organization (Specific)**:
+- University
+- Company
+- GovernmentAgency
+- MediaOutlet
+- Hospital
+- School: K-12 school
+- NGO
+
+**Organization (Fallback)**:
+- Organization: Any organization (used when not matching other specific types)
+
+## Relationship Type Reference
+
+- WORKS_FOR
+- STUDIES_AT
+- AFFILIATED_WITH
+- REPRESENTS
+- REGULATES
+- REPORTS_ON
+- COMMENTS_ON
+- RESPONDS_TO
+- SUPPORTS
+- OPPOSES
+- COLLABORATES_WITH
+- COMPETES_WITH""",
+
+ 'ontology_user': """\
+## Simulation Requirement
+
+{simulation_requirement}
+
+## Document Content
+
+{combined_text}
+""",
+
+ 'ontology_user_additional': """\
+
+## Additional Notes
+
+{additional_context}
+""",
+
+ 'ontology_user_instructions': """\
+
+Based on the above content, design entity types and relationship types suitable for social public opinion simulation.
+
+**Rules that must be followed**:
+1. Output exactly 10 entity types
+2. The last 2 must be fallback types: Person (individual fallback) and Organization (organization fallback)
+3. The first 8 are specific types designed based on text content
+4. All entity types must be real-world subjects that can speak publicly; no abstract concepts
+5. Attribute names cannot use name, uuid, group_id, etc. — use full_name, org_name, etc. instead
+""",
+
+ 'ontology_text_truncated': "\n\n...(Original text: {original_length} characters total; first {max_length} characters used for ontology analysis)...",
+
+ # ── Simulation config generator ──────────────────────────
+
+ 'config_time_system': "You are a social media simulation expert. Return pure JSON format; time configuration should be adapted to Western daily routines.",
+
+ 'config_time_prompt': """\
+Based on the following simulation requirements, generate a time simulation configuration.
+
+{context}
+
+## Task
+Please generate a time configuration JSON.
+
+### Basic Principles (for reference only; adjust flexibly based on the specific event and participant groups):
+- The user base follows Western daily routines
+- 0-5 AM: almost no activity (activity coefficient 0.05)
+- 6-8 AM: gradually waking up (activity coefficient 0.4)
+- 9 AM-6 PM: moderate activity during work hours (activity coefficient 0.7)
+- 7-10 PM: peak hours (activity coefficient 1.5)
+- 11 PM: declining activity (activity coefficient 0.5)
+- General pattern: low activity at night, gradual increase in the morning, moderate during work, evening peak
+- **Important**: The example values below are for reference only; you need to adjust based on event nature and participant group characteristics
+ - Example: Student groups may peak at 9-11 PM; media is active all day; government agencies only during work hours
+ - Example: Breaking news may cause late-night discussions; off_peak_hours can be shortened accordingly
+
+### Return JSON format (no markdown)
+
+Example:
+{{
+ "total_simulation_hours": 72,
+ "minutes_per_round": 60,
+ "agents_per_hour_min": 5,
+ "agents_per_hour_max": 50,
+ "peak_hours": [19, 20, 21, 22],
+ "off_peak_hours": [0, 1, 2, 3, 4, 5],
+ "morning_hours": [6, 7, 8],
+ "work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
+ "reasoning": "Time configuration explanation for this event"
+}}
+
+Field descriptions:
+- total_simulation_hours (int): Total simulation duration, 24-168 hours; shorter for breaking events, longer for ongoing topics
+- minutes_per_round (int): Duration per round, 30-120 minutes; 60 recommended
+- agents_per_hour_min (int): Minimum Agents activated per hour (range: 1-{max_agents_allowed})
+- agents_per_hour_max (int): Maximum Agents activated per hour (range: 1-{max_agents_allowed})
+- peak_hours (int array): Peak hours; adjust based on event participant groups
+- off_peak_hours (int array): Low-activity hours; usually late night/early morning
+- morning_hours (int array): Morning hours
+- work_hours (int array): Work hours
+- reasoning (string): Brief explanation of why this configuration was chosen""",
+
+ 'config_time_default_reasoning': "Using default Western daily routine configuration (1 hour per round)",
+
+ 'config_event_prompt': """\
+Based on the following simulation requirements, generate an event configuration.
+
+Simulation requirement: {simulation_requirement}
+
+{context}
+
+## Available Entity Types and Examples
+{type_info}
+
+## Task
+Please generate an event configuration JSON:
+- Extract hot topic keywords
+- Describe the direction of public opinion development
+- Design initial post content; **each post must specify a poster_type (publisher type)**
+
+**Important**: poster_type must be selected from the "Available Entity Types" above so that initial posts can be assigned to appropriate Agents for publishing.
+For example: official statements should be published by Official/University types, news by MediaOutlet, student opinions by Student.
+
+Return JSON format (no markdown):
+{{
+ "hot_topics": ["keyword1", "keyword2", ...],
+ "narrative_direction": "",
+ "initial_posts": [
+ {{"content": "Post content", "poster_type": "Entity type (must be from available types)"}},
+ ...
+ ],
+ "reasoning": ""
+}}""",
+
+ 'config_event_system': "You are a public opinion analysis expert. Return pure JSON format. Note that poster_type must exactly match an available entity type.",
+
+ 'config_agent_prompt': """\
+Based on the following information, generate social media activity configurations for each entity.
+
+Simulation requirement: {simulation_requirement}
+
+## Entity List
+```json
+{entity_list_json}
+```
+
+## Task
+Generate activity configurations for each entity. Note:
+- **Schedule adapted to Western daily routines**: 0-5 AM almost no activity, 7-10 PM most active
+- **Official institutions** (University/GovernmentAgency): Low activity (0.1-0.3), work hours (9-17), slow response (60-240 min), high influence (2.5-3.0)
+- **Media** (MediaOutlet): Medium activity (0.4-0.6), all-day active (8-23), fast response (5-30 min), high influence (2.0-2.5)
+- **Individuals** (Student/Person/Alumni): High activity (0.6-0.9), mainly evening (18-23), fast response (1-15 min), low influence (0.8-1.2)
+- **Public figures/Experts**: Medium activity (0.4-0.6), medium-high influence (1.5-2.0)
+
+Return JSON format (no markdown):
+{{
+ "agent_configs": [
+ {{
+ "agent_id": ,
+ "activity_level": <0.0-1.0>,
+ "posts_per_hour": ,
+ "comments_per_hour": ,
+ "active_hours": [],
+ "response_delay_min": ,
+ "response_delay_max": ,
+ "sentiment_bias": <-1.0 to 1.0>,
+ "stance": "",
+ "influence_weight":
+ }},
+ ...
+ ]
+}}""",
+
+ 'config_agent_system': "You are a social media behavior analysis expert. Return pure JSON; configurations should be adapted to Western daily routines.",
+
+ # ── Profile generation ───────────────────────────────────
+
+ 'profile_system': "You are a social media user profile generation expert. Generate detailed, realistic personas for public opinion simulation, restoring real-world situations as closely as possible. You must return valid JSON format; all string values must not contain unescaped newline characters. Use English.",
+
+ 'profile_individual_prompt': """\
+Generate a detailed social media user persona for this entity, restoring real-world situations as closely as possible.
+
+Entity name: {entity_name}
+Entity type: {entity_type}
+Entity summary: {entity_summary}
+Entity attributes: {attrs_str}
+
+Context information:
+{context_str}
+
+Please generate JSON with the following fields:
+
+1. bio: Social media biography, 200 words
+2. persona: Detailed persona description (2000 words of plain text), including:
+ - Basic information (age, profession, education background, location)
+ - Background (important experiences, connection to the event, social relationships)
+ - Personality traits (MBTI type, core personality, emotional expression style)
+ - Social media behavior (posting frequency, content preferences, interaction style, language characteristics)
+ - Positions and opinions (attitude toward topics, content that may anger/move them)
+ - Unique characteristics (catchphrases, special experiences, personal hobbies)
+ - Personal memory (important part of the persona; introduce this individual's connection to the event and their existing actions and reactions in the event)
+3. age: Age as a number (must be an integer)
+4. gender: Gender, must be in English: "male" or "female"
+5. mbti: MBTI type (e.g., INTJ, ENFP, etc.)
+6. country: Country (use English, e.g., "United States")
+7. profession: Profession
+8. interested_topics: Array of topics of interest
+
+Important:
+- All field values must be strings or numbers; do not use newline characters
+- persona must be a single coherent text description
+- Use English (gender field must use English male/female)
+- Content must be consistent with entity information
+- age must be a valid integer; gender must be "male" or "female"
+""",
+
+ 'profile_group_prompt': """\
+Generate a detailed social media account profile for this organization/group entity, restoring real-world situations as closely as possible.
+
+Entity name: {entity_name}
+Entity type: {entity_type}
+Entity summary: {entity_summary}
+Entity attributes: {attrs_str}
+
+Context information:
+{context_str}
+
+Please generate JSON with the following fields:
+
+1. bio: Official account biography, 200 words, professional and appropriate
+2. persona: Detailed account profile description (2000 words of plain text), including:
+ - Organization basics (official name, nature, founding background, main functions)
+ - Account positioning (account type, target audience, core functions)
+ - Communication style (language characteristics, common expressions, taboo topics)
+ - Content characteristics (content types, posting frequency, active time periods)
+ - Positions and attitudes (official stance on core topics, approach to controversies)
+ - Special notes (represented group profile, operational habits)
+ - Organizational memory (important part of the profile; introduce this organization's connection to the event and its existing actions and reactions in the event)
+3. age: Fixed at 30 (virtual age for organizational accounts)
+4. gender: Fixed as "other" (organizational accounts use "other" for non-individual)
+5. mbti: MBTI type to describe account style, e.g., ISTJ for rigorous and conservative
+6. country: Country (use English, e.g., "United States")
+7. profession: Organizational function description
+8. interested_topics: Array of areas of interest
+
+Important:
+- All field values must be strings or numbers; null values are not allowed
+- persona must be a single coherent text description; do not use newline characters
+- Use English (gender field must be "other")
+- age must be the integer 30; gender must be the string "other"
+- Organizational account statements must be consistent with their identity and positioning""",
+
+ # ── Interview ─────────────────────────────────────────────
+
+ 'interview_prompt_prefix': (
+ "You are being interviewed. Based on your persona, all past memories, and actions, "
+ "respond directly in plain text to the following questions.\n"
+ "Response requirements:\n"
+ "1. Answer directly in natural language; do not call any tools\n"
+ "2. Do not return JSON format or tool call format\n"
+ "3. Do not use Markdown headings (such as #, ##, ###)\n"
+ "4. Answer each question in order, starting each answer with 'Question X:' (where X is the question number)\n"
+ "5. Separate answers to different questions with a blank line\n"
+ "6. Provide substantive answers; each question should have at least 2-3 sentences\n\n"
+ ),
+
+ 'interview_select_system': """\
+You are a professional interview planning expert. Your task is to select the most suitable interview subjects from the simulation Agent list based on the interview requirements.
+
+Selection criteria:
+1. The Agent's identity/profession is related to the interview topic
+2. The Agent may hold unique or valuable viewpoints
+3. Select diverse perspectives (e.g., supporters, opponents, neutral parties, professionals, etc.)
+4. Prioritize roles directly related to the event
+
+Return JSON format:
+{
+ "selected_indices": [list of selected Agent indices],
+ "reasoning": "Explanation of selection rationale"
+}""",
+
+ 'interview_select_user': """\
+Interview requirement:
+{interview_requirement}
+
+Simulation background:
+{simulation_requirement}
+
+Available Agents (total {agent_count}):
+{agent_summaries_json}
+
+Please select up to {max_agents} most suitable Agents for interviewing and explain the reasoning.""",
+
+ 'interview_questions_system': """\
+You are a professional journalist/interviewer. Generate 3-5 in-depth interview questions based on the interview requirements.
+
+Question requirements:
+1. Open-ended questions that encourage detailed answers
+2. Different roles may give different answers
+3. Cover multiple dimensions: facts, opinions, feelings, etc.
+4. Natural language, like a real interview
+5. Keep each question under 50 words; concise and clear
+6. Ask directly; do not include background explanations or prefixes
+
+Return JSON format: {"questions": ["Question 1", "Question 2", ...]}""",
+
+ 'interview_questions_user': """\
+Interview requirement: {interview_requirement}
+
+Simulation background: {simulation_requirement}
+
+Interviewee roles: {agent_roles}
+
+Please generate 3-5 interview questions.""",
+
+ 'interview_summary_system': """\
+You are a professional news editor. Based on multiple interviewees' responses, generate an interview summary.
+
+Summary requirements:
+1. Distill the main viewpoints of each party
+2. Identify consensus and disagreements
+3. Highlight valuable quotes
+4. Remain objective and neutral; do not favor any side
+5. Keep within 1000 words
+
+Format constraints (must follow):
+- Use plain text paragraphs separated by blank lines
+- Do not use Markdown headings (such as #, ##, ###)
+- Do not use dividers (such as ---, ***)
+- When quoting interviewees, use quotation marks
+- You may use **bold** to highlight keywords, but do not use other Markdown syntax""",
+
+ 'interview_summary_user': """\
+Interview topic: {interview_requirement}
+
+Interview content:
+{interview_texts}
+
+Please generate an interview summary.""",
+
+ 'interview_default_question': "Regarding {topic}, what is your view?",
+
+ 'interview_fallback_questions': [
+ "Regarding {topic}, what is your viewpoint?",
+ "How does this matter affect you or the group you represent?",
+ "How do you think this issue should be resolved or improved?"
+ ],
+
+ # ── Sub-query generation ─────────────────────────────────
+
+ 'sub_query_system': """\
+You are a professional question analysis expert. Your task is to decompose a complex question into multiple sub-questions that can be independently observed in a simulated world.
+
+Requirements:
+1. Each sub-question should be specific enough to find related Agent behaviors or events in the simulated world
+2. Sub-questions should cover different dimensions of the original question (who, what, why, how, when, where)
+3. Sub-questions should be relevant to the simulation scenario
+4. Return JSON format: {"sub_queries": ["Sub-question 1", "Sub-question 2", ...]}""",
+
+ 'sub_query_user': """\
+Simulation requirement background:
+{simulation_requirement}
+
+{report_context_line}
+
+Please decompose the following question into {max_queries} sub-questions:
+{query}
+
+Return a JSON list of sub-questions.""",
+
+ 'sub_query_fallback_participants': "{query} — main participants",
+ 'sub_query_fallback_causes': "{query} — causes and impacts",
+ 'sub_query_fallback_process': "{query} — development process",
+
+ # ── Conflict/error handling prompts ──────────────────────
+
+ 'react_conflict_error': (
+ "[Format Error] You included both a tool call and a Final Answer in the same reply, which is not allowed.\n"
+ "Each reply can only do one of the following:\n"
+ "- Call one tool (output a block; do NOT write Final Answer)\n"
+ "- Output final content (begin with 'Final Answer:'; do NOT include )\n"
+ "Please reply again, doing only one of these."
+ ),
+
+ 'llm_empty_response_continue': "Please continue generating content.",
+ 'section_generation_failed': "(This section failed to generate: LLM returned an empty response. Please try again later.)",
+}
+
+
+# ═══════════════════════════════════════════════════════════════
+# FORMATS – parameterised display / output strings
+# ═══════════════════════════════════════════════════════════════
+
+FORMATS = {
+ # ── Search / retrieval ───────────────────────────────────
+ 'search_query': 'Search query: {query}',
+ 'search_results_found': 'Found {count} relevant results',
+ 'search_facts_header': '\n### Relevant Facts:',
+ 'search_edges_header': '\n### Relevant Edges:',
+ 'search_nodes_header': '\n### Relevant Nodes:',
+ 'entity_unknown_type': 'Unknown type',
+ 'entity_format': 'Entity: {name} (Type: {type})',
+ 'entity_summary': 'Summary: {summary}',
+ 'edge_format': 'Relationship: {source} --[{name}]--> {target}',
+ 'edge_fact': 'Fact: {fact}',
+ 'edge_unknown': 'Unknown',
+ 'edge_until_now': 'Present',
+ 'edge_validity': 'Validity: {valid_at} - {invalid_at}',
+ 'edge_expired': 'Expired: {expired_at}',
+
+ # ── InsightForge / Panorama ──────────────────────────────
+ 'insight_header': '## Deep Analysis of Future Predictions',
+ 'insight_query': 'Analysis question: {query}',
+ 'insight_scenario': 'Prediction scenario: {requirement}',
+ 'insight_stats_header': '\n### Prediction Data Statistics',
+ 'insight_stats_facts': '- Related prediction facts: {count}',
+ 'insight_stats_entities': '- Entities involved: {count}',
+ 'insight_stats_relations': '- Relationship chains: {count}',
+ 'insight_sub_queries_header': '\n### Sub-questions Analyzed',
+ 'insight_key_facts_header': '\n### [Key Facts] (please cite these in the report)',
+ 'insight_core_entities_header': '\n### [Core Entities]',
+ 'insight_entity_line': '- **{name}** ({type})',
+ 'insight_entity_summary_line': ' Summary: "{summary}"',
+ 'insight_entity_facts_line': ' Related facts: {count}',
+ 'insight_chains_header': '\n### [Relationship Chains]',
+
+ 'panorama_header': '## Panoramic Search Results (Future Full View)',
+ 'panorama_query': 'Query: {query}',
+ 'panorama_stats_header': '\n### Statistics',
+ 'panorama_stats_nodes': '- Total nodes: {count}',
+ 'panorama_stats_edges': '- Total edges: {count}',
+ 'panorama_stats_active': '- Currently valid facts: {count}',
+ 'panorama_stats_historical': '- Historical/expired facts: {count}',
+ 'panorama_active_header': '\n### [Currently Valid Facts] (simulation result originals)',
+ 'panorama_historical_header': '\n### [Historical/Expired Facts] (evolution records)',
+ 'panorama_entities_header': '\n### [Entities Involved]',
+
+ # ── Interview ────────────────────────────────────────────
+ 'interview_report_header': '## In-Depth Interview Report',
+ 'interview_topic_line': '**Interview Topic:** {topic}',
+ 'interview_count_line': '**Interviewees:** {interviewed} / {total} simulation Agents',
+ 'interview_selection_header': '\n### Interview Subject Selection Rationale',
+ 'interview_selection_auto': '(Automatic selection)',
+ 'interview_transcript_header': '\n### Interview Transcript',
+ 'interview_entry': '\n#### Interview #{index}: {agent_name}',
+ 'interview_no_records': '(No interview records)\n\n---',
+ 'interview_summary_header': '\n### Interview Summary and Core Viewpoints',
+ 'interview_no_summary': '(No summary)',
+ 'interview_bio_line': '_Bio: {bio}_',
+ 'interview_key_quotes_header': '\n**Key Quotes:**\n',
+
+ # ── Agent activity descriptions (Zep graph updater) ──────
+ 'activity_create_post_with_content': 'published a post: "{content}"',
+ 'activity_create_post': 'published a post',
+ 'activity_like_post_full': "liked {author}'s post: \"{content}\"",
+ 'activity_like_post_content': 'liked a post: "{content}"',
+ 'activity_like_post_author': "liked a post by {author}",
+ 'activity_like_post': 'liked a post',
+ 'activity_dislike_post_full': "disliked {author}'s post: \"{content}\"",
+ 'activity_dislike_post_content': 'disliked a post: "{content}"',
+ 'activity_dislike_post_author': "disliked a post by {author}",
+ 'activity_dislike_post': 'disliked a post',
+ 'activity_repost_full': "reposted {author}'s post: \"{content}\"",
+ 'activity_repost_content': 'reposted a post: "{content}"',
+ 'activity_repost_author': "reposted a post by {author}",
+ 'activity_repost': 'reposted a post',
+ 'activity_quote_post_full': 'quoted {author}\'s post "{content}"',
+ 'activity_quote_post_content': 'quoted a post "{content}"',
+ 'activity_quote_post_author': "quoted a post by {author}",
+ 'activity_quote_post': 'quoted a post',
+ 'activity_quote_comment': ', and commented: "{comment}"',
+ 'activity_follow_user': 'followed user "{target}"',
+ 'activity_follow': 'followed a user',
+ 'activity_comment_full': 'commented on {author}\'s post "{post}": "{comment}"',
+ 'activity_comment_on_post': 'commented on post "{post}": "{comment}"',
+ 'activity_comment_on_author': "commented on {author}'s post: \"{comment}\"",
+ 'activity_comment_content': 'commented: "{comment}"',
+ 'activity_comment': 'posted a comment',
+ 'activity_like_comment_full': "liked {author}'s comment: \"{content}\"",
+ 'activity_like_comment_content': 'liked a comment: "{content}"',
+ 'activity_like_comment_author': "liked a comment by {author}",
+ 'activity_like_comment': 'liked a comment',
+ 'activity_dislike_comment_full': "disliked {author}'s comment: \"{content}\"",
+ 'activity_dislike_comment_content': 'disliked a comment: "{content}"',
+ 'activity_dislike_comment_author': "disliked a comment by {author}",
+ 'activity_dislike_comment': 'disliked a comment',
+ 'activity_search': 'searched for "{query}"',
+ 'activity_search_generic': 'performed a search',
+ 'activity_search_user': 'searched for user "{query}"',
+ 'activity_search_user_generic': 'searched for users',
+ 'activity_mute_user': 'muted user "{target}"',
+ 'activity_mute': 'muted a user',
+ 'activity_generic': 'performed a {action_type} action',
+
+ # ── Platform names ───────────────────────────────────────
+ 'platform_twitter': 'World 1',
+ 'platform_reddit': 'World 2',
+ 'platform_twitter_answer': '[Twitter Platform Response]',
+ 'platform_reddit_answer': '[Reddit Platform Response]',
+
+ # ── Entity summary display ───────────────────────────────
+ 'entity_type_header': '\n### {entity_type} ({count})',
+ 'entity_type_more': ' ... and {remaining} more',
+
+ # ── Simulation config display ────────────────────────────
+ 'simulation_requirement_header': '## Simulation Requirement\n{requirement}',
+ 'entity_info_header': '\n## Entity Information ({count})\n{summary}',
+ 'document_truncated': '\n...(document truncated)',
+ 'original_document_header': '\n## Original Document Content\n{text}',
+
+ # ── Zep search context ───────────────────────────────────
+ 'zep_comprehensive_query': 'All information, activities, events, relationships, and background about {entity_name}',
+ 'zep_related_entity': 'Related entity: {name}',
+ 'zep_facts_header': 'Factual information:\n',
+ 'zep_related_entities_header': 'Related entities:\n',
+}
+
+
+# ═══════════════════════════════════════════════════════════════
+# STRINGS – fixed UI / status / label strings
+# ═══════════════════════════════════════════════════════════════
+
+STRINGS = {
+ # ── Report defaults ──────────────────────────────────────
+ 'report_default_title': 'Future Prediction Report',
+ 'report_default_summary': 'Analysis of future trends and risks based on simulation predictions',
+ 'report_default_section_1': 'Prediction Scenarios and Core Findings',
+ 'report_default_section_2': 'Population Behavior Prediction Analysis',
+ 'report_default_section_3': 'Trend Outlook and Risk Alerts',
+ 'report_first_section': '(This is the first section)',
+
+ # ── Report log messages ──────────────────────────────────
+ 'log_report_start': 'Report generation task started',
+ 'log_planning_start': 'Starting report outline planning',
+ 'log_planning_context': 'Retrieving simulation context information',
+ 'log_planning_complete': 'Outline planning complete',
+ 'log_section_start': 'Starting section generation: {title}',
+ 'log_react_thought': 'ReACT round {iteration} thinking',
+ 'log_tool_call': 'Calling tool: {tool_name}',
+ 'log_tool_result': 'Tool {tool_name} returned result',
+ 'log_llm_response': 'LLM response (tool call: {has_tool_calls}, final answer: {has_final_answer})',
+ 'log_section_content': 'Section {title} content generation complete',
+ 'log_section_complete': 'Section {title} generation complete',
+ 'log_report_complete': 'Report generation complete',
+ 'log_error': 'Error occurred: {error}',
+
+ # ── Progress messages ────────────────────────────────────
+ 'progress_init': 'Initializing report...',
+ 'progress_analyzing': 'Analyzing simulation requirements...',
+ 'progress_generating_outline': 'Generating report outline...',
+ 'progress_parsing_outline': 'Parsing outline structure...',
+ 'progress_outline_complete': 'Outline planning complete',
+ 'progress_deep_retrieval': 'Deep retrieval and writing ({current}/{max})',
+ 'progress_preparing_env': 'Starting simulation environment preparation...',
+ 'progress_stage_reading': 'Reading graph entities',
+ 'progress_stage_profiles': 'Generating Agent personas',
+ 'progress_stage_config': 'Generating simulation configuration',
+ 'progress_stage_scripts': 'Preparing simulation scripts',
+
+ # ── Interview ────────────────────────────────────────────
+ 'interview_no_profiles': 'No interviewable Agent profile files found',
+ 'interview_api_failed': 'Interview API call failed: {error}. Please check the OASIS simulation environment status.',
+ 'interview_env_not_running': 'Interview failed: {error}. The simulation environment may be shut down; please ensure the OASIS environment is running.',
+ 'interview_error': 'An error occurred during the interview: {error}',
+ 'interview_no_completed': 'No interviews completed',
+ 'interview_summary_fallback': 'Interviewed {count} respondents, including: {names}',
+ 'interview_default_selection': 'Using default selection strategy',
+ 'interview_auto_selection': 'Automatically selected based on relevance',
+ 'interview_platform_no_response': '(No response from this platform)',
+ 'interview_no_reply': '[No reply]',
+
+ # ── Profile ──────────────────────────────────────────────
+ 'profile_default_country': 'United States',
+ 'profile_unknown': 'Unknown',
+
+ # ── Simulation status ────────────────────────────────────
+ 'sim_dir_not_exist': 'Simulation directory does not exist',
+ 'sim_missing_files': 'Missing required files',
+ 'sim_already_prepared': 'Preparation already complete; no need to regenerate',
+ 'sim_provide_project_id': 'Please provide project_id',
+ 'sim_provide_simulation_id': 'Please provide simulation_id',
+ 'sim_project_not_found': 'Project not found: {id}',
+ 'sim_not_found': 'Simulation not found: {id}',
+ 'sim_no_graph': 'The project has not built a graph yet; please call /api/graph/build first',
+ 'sim_no_requirement': 'Project missing simulation requirement description (simulation_requirement)',
+ 'sim_entity_not_found': 'Entity not found: {uuid}',
+ 'sim_zep_not_configured': 'ZEP_API_KEY not configured',
+ 'sim_llm_not_configured': 'LLM_API_KEY not configured',
+
+ # ── Tool errors ──────────────────────────────────────────
+ 'tool_unknown': 'Unknown tool: {name}. Please use one of: insight_forge, panorama_search, quick_search',
+ 'tool_execution_failed': 'Tool execution failed: {error}',
+
+ # ── Ontology ─────────────────────────────────────────────
+ 'ontology_auto_generated_header': '"""\nCustom Entity Type Definitions\nAuto-generated by MiroFish for social public opinion simulation\n"""',
+ 'ontology_entity_section': '# ============== Entity Type Definitions ==============',
+ 'ontology_edge_section': '# ============== Relationship Type Definitions ==============',
+ 'ontology_config_section': '# ============== Type Configuration ==============',
+}
+
+
+# ═══════════════════════════════════════════════════════════════
+# PATTERNS – regex patterns that match text produced by FORMATS
+# ═══════════════════════════════════════════════════════════════
+
+PATTERNS = {
+ # ── InsightForge / Panorama ──────────────────────────────
+ 'insight_query': r'Analysis question:\s*(.+?)(?:\n|$)',
+ 'insight_scenario': r'Prediction scenario:\s*(.+?)(?:\n|$)',
+
+ # ── ReACT / Final Answer ─────────────────────────────────
+ 'final_answer': r'(?:Final Answer)[::]\s*\n*([\s\S]*)$',
+ 'tool_call_xml': r'\s*(\{.*?\})\s*',
+ 'tool_call_bare_json': r'(\{"(?:name|tool)"\s*:.*?\})\s*$',
+
+ # ── Interview question splitting ─────────────────────────
+ 'question_split': r'(?:^|[\r\n]+)Question\s*(\d+)[::]\s*',
+ 'question_strip': r'^Question\s*\d+[::]\s*',
+ 'question_number_in_text': r'Question\d+',
+
+ # ── Platform labels ──────────────────────────────────────
+ 'platform_no_response': '(No response from this platform)',
+ 'no_reply': '[No reply]',
+ 'platform_twitter_label': r'\[Twitter Platform Response\]',
+ 'platform_reddit_label': r'\[Reddit Platform Response\]',
+
+ # ── Search / entity ──────────────────────────────────────
+ 'search_query_extract': r'Search query:\s*(.+?)(?:\n|$)',
+
+ # ── Edge validity ────────────────────────────────────────
+ 'edge_validity_extract': r'Validity:\s*(.+?)\s*-\s*(.+?)(?:\n|$)',
+ 'edge_expired_extract': r'Expired:\s*(.+?)(?:\n|$)',
+
+ # ── Markdown cleanup ─────────────────────────────────────
+ 'markdown_heading': r'#{1,6}\s+',
+ 'markdown_tool_json': r'\{[^}]*tool_name[^}]*\}',
+ 'markdown_formatting': r'[*_`|>~\-]{2,}',
+ 'question_number_line': r'Question\d+[::]\s*',
+ 'bracket_label': r'\[[^\]]+\]',
+}
diff --git a/backend/app/i18n/zh.py b/backend/app/i18n/zh.py
new file mode 100644
index 000000000..66f290a58
--- /dev/null
+++ b/backend/app/i18n/zh.py
@@ -0,0 +1,1110 @@
+"""Chinese language pack (default)"""
+
+# ═══════════════════════════════════════════════════════════════
+# Large prompt templates
+# ═══════════════════════════════════════════════════════════════
+
+PROMPTS = {
+
+ # ── report_agent.py: Tool descriptions ──
+
+ 'report_tool_desc_insight': """\
+【深度洞察检索 - 强大的检索工具】
+这是我们强大的检索函数,专为深度分析设计。它会:
+1. 自动将你的问题分解为多个子问题
+2. 从多个维度检索模拟图谱中的信息
+3. 整合语义搜索、实体分析、关系链追踪的结果
+4. 返回最全面、最深度的检索内容
+
+【使用场景】
+- 需要深入分析某个话题
+- 需要了解事件的多个方面
+- 需要获取支撑报告章节的丰富素材
+
+【返回内容】
+- 相关事实原文(可直接引用)
+- 核心实体洞察
+- 关系链分析""",
+
+ 'report_tool_desc_panorama': """\
+【广度搜索 - 获取全貌视图】
+这个工具用于获取模拟结果的完整全貌,特别适合了解事件演变过程。它会:
+1. 获取所有相关节点和关系
+2. 区分当前有效的事实和历史/过期的事实
+3. 帮助你了解舆情是如何演变的
+
+【使用场景】
+- 需要了解事件的完整发展脉络
+- 需要对比不同阶段的舆情变化
+- 需要获取全面的实体和关系信息
+
+【返回内容】
+- 当前有效事实(模拟最新结果)
+- 历史/过期事实(演变记录)
+- 所有涉及的实体""",
+
+ 'report_tool_desc_quick': """\
+【简单搜索 - 快速检索】
+轻量级的快速检索工具,适合简单、直接的信息查询。
+
+【使用场景】
+- 需要快速查找某个具体信息
+- 需要验证某个事实
+- 简单的信息检索
+
+【返回内容】
+- 与查询最相关的事实列表""",
+
+ 'report_tool_desc_interview': """\
+【深度采访 - 真实Agent采访(双平台)】
+调用OASIS模拟环境的采访API,对正在运行的模拟Agent进行真实采访!
+这不是LLM模拟,而是调用真实的采访接口获取模拟Agent的原始回答。
+默认在Twitter和Reddit两个平台同时采访,获取更全面的观点。
+
+功能流程:
+1. 自动读取人设文件,了解所有模拟Agent
+2. 智能选择与采访主题最相关的Agent(如学生、媒体、官方等)
+3. 自动生成采访问题
+4. 调用 /api/simulation/interview/batch 接口在双平台进行真实采访
+5. 整合所有采访结果,提供多视角分析
+
+【使用场景】
+- 需要从不同角色视角了解事件看法(学生怎么看?媒体怎么看?官方怎么说?)
+- 需要收集多方意见和立场
+- 需要获取模拟Agent的真实回答(来自OASIS模拟环境)
+- 想让报告更生动,包含"采访实录"
+
+【返回内容】
+- 被采访Agent的身份信息
+- 各Agent在Twitter和Reddit两个平台的采访回答
+- 关键引言(可直接引用)
+- 采访摘要和观点对比
+
+【重要】需要OASIS模拟环境正在运行才能使用此功能!""",
+
+ # ── report_agent.py: Plan prompts ──
+
+ 'report_plan_system': """\
+你是一个「未来预测报告」的撰写专家,拥有对模拟世界的「上帝视角」——你可以洞察模拟中每一位Agent的行为、言论和互动。
+
+【核心理念】
+我们构建了一个模拟世界,并向其中注入了特定的「模拟需求」作为变量。模拟世界的演化结果,就是对未来可能发生情况的预测。你正在观察的不是"实验数据",而是"未来的预演"。
+
+【你的任务】
+撰写一份「未来预测报告」,回答:
+1. 在我们设定的条件下,未来发生了什么?
+2. 各类Agent(人群)是如何反应和行动?
+3. 这个模拟揭示了哪些值得关注的未来趋势和风险?
+
+【报告定位】
+- ✅ 这是一份基于模拟的未来预测报告,揭示"如果这样,未来会怎样"
+- ✅ 聚焦于预测结果:事件走向、群体反应、涌现现象、潜在风险
+- ✅ 模拟世界中的Agent言行就是对未来人群行为的预测
+- ❌ 不是对现实世界现状的分析
+- ❌ 不是泛泛而谈的舆情综述
+
+【章节数量限制】
+- 最少2个章节,最多5个章节
+- 不需要子章节,每个章节直接撰写完整内容
+- 内容要精炼,聚焦于核心预测发现
+- 章节结构由你根据预测结果自主设计
+
+请输出JSON格式的报告大纲,格式如下:
+{
+ "title": "报告标题",
+ "summary": "报告摘要(一句话概括核心预测发现)",
+ "sections": [
+ {
+ "title": "章节标题",
+ "description": "章节内容描述"
+ }
+ ]
+}
+
+注意:sections数组最少2个,最多5个元素!""",
+
+ 'report_plan_user': """\
+【预测场景设定】
+我们向模拟世界注入的变量(模拟需求):{simulation_requirement}
+
+【模拟世界规模】
+- 参与模拟的实体数量: {total_nodes}
+- 实体间产生的关系数量: {total_edges}
+- 实体类型分布: {entity_types}
+- 活跃Agent数量: {total_entities}
+
+【模拟预测到的部分未来事实样本】
+{related_facts_json}
+
+请以「上帝视角」审视这个未来预演:
+1. 在我们设定的条件下,未来呈现出了什么样的状态?
+2. 各类人群(Agent)是如何反应和行动的?
+3. 这个模拟揭示了哪些值得关注的未来趋势?
+
+根据预测结果,设计最合适的报告章节结构。
+
+【再次提醒】报告章节数量:最少2个,最多5个,内容要精炼聚焦于核心预测发现。""",
+
+ # ── report_agent.py: Section prompts ──
+
+ 'report_section_system': """\
+你是一个「未来预测报告」的撰写专家,正在撰写报告的一个章节。
+
+报告标题: {report_title}
+报告摘要: {report_summary}
+预测场景(模拟需求): {simulation_requirement}
+
+当前要撰写的章节: {section_title}
+
+═══════════════════════════════════════════════════════════════
+【核心理念】
+═══════════════════════════════════════════════════════════════
+
+模拟世界是对未来的预演。我们向模拟世界注入了特定条件(模拟需求),
+模拟中Agent的行为和互动,就是对未来人群行为的预测。
+
+你的任务是:
+- 揭示在设定条件下,未来发生了什么
+- 预测各类人群(Agent)是如何反应和行动的
+- 发现值得关注的未来趋势、风险和机会
+
+❌ 不要写成对现实世界现状的分析
+✅ 要聚焦于"未来会怎样"——模拟结果就是预测的未来
+
+═══════════════════════════════════════════════════════════════
+【最重要的规则 - 必须遵守】
+═══════════════════════════════════════════════════════════════
+
+1. 【必须调用工具观察模拟世界】
+ - 你正在以「上帝视角」观察未来的预演
+ - 所有内容必须来自模拟世界中发生的事件和Agent言行
+ - 禁止使用你自己的知识来编写报告内容
+ - 每个章节至少调用3次工具(最多5次)来观察模拟的世界,它代表了未来
+
+2. 【必须引用Agent的原始言行】
+ - Agent的发言和行为是对未来人群行为的预测
+ - 在报告中使用引用格式展示这些预测,例如:
+ > "某类人群会表示:原文内容..."
+ - 这些引用是模拟预测的核心证据
+
+3. 【语言一致性 - 引用内容必须翻译为报告语言】
+ - 工具返回的内容可能包含英文或中英文混杂的表述
+ - 如果模拟需求和材料原文是中文的,报告必须全部使用中文撰写
+ - 当你引用工具返回的英文或中英混杂内容时,必须将其翻译为流畅的中文后再写入报告
+ - 翻译时保持原意不变,确保表述自然通顺
+ - 这一规则同时适用于正文和引用块(> 格式)中的内容
+
+4. 【忠实呈现预测结果】
+ - 报告内容必须反映模拟世界中的代表未来的模拟结果
+ - 不要添加模拟中不存在的信息
+ - 如果某方面信息不足,如实说明
+
+═══════════════════════════════════════════════════════════════
+【⚠️ 格式规范 - 极其重要!】
+═══════════════════════════════════════════════════════════════
+
+【一个章节 = 最小内容单位】
+- 每个章节是报告的最小分块单位
+- ❌ 禁止在章节内使用任何 Markdown 标题(#、##、###、#### 等)
+- ❌ 禁止在内容开头添加章节主标题
+- ✅ 章节标题由系统自动添加,你只需撰写纯正文内容
+- ✅ 使用**粗体**、段落分隔、引用、列表来组织内容,但不要用标题
+
+【正确示例】
+```
+本章节分析了事件的舆论传播态势。通过对模拟数据的深入分析,我们发现...
+
+**首发引爆阶段**
+
+微博作为舆情的第一现场,承担了信息首发的核心功能:
+
+> "微博贡献了68%的首发声量..."
+
+**情绪放大阶段**
+
+抖音平台进一步放大了事件影响力:
+
+- 视觉冲击力强
+- 情绪共鸣度高
+```
+
+【错误示例】
+```
+## 执行摘要 ← 错误!不要添加任何标题
+### 一、首发阶段 ← 错误!不要用###分小节
+#### 1.1 详细分析 ← 错误!不要用####细分
+
+本章节分析了...
+```
+
+═══════════════════════════════════════════════════════════════
+【可用检索工具】(每章节调用3-5次)
+═══════════════════════════════════════════════════════════════
+
+{tools_description}
+
+【工具使用建议 - 请混合使用不同工具,不要只用一种】
+- insight_forge: 深度洞察分析,自动分解问题并多维度检索事实和关系
+- panorama_search: 广角全景搜索,了解事件全貌、时间线和演变过程
+- quick_search: 快速验证某个具体信息点
+- interview_agents: 采访模拟Agent,获取不同角色的第一人称观点和真实反应
+
+═══════════════════════════════════════════════════════════════
+【工作流程】
+═══════════════════════════════════════════════════════════════
+
+每次回复你只能做以下两件事之一(不可同时做):
+
+选项A - 调用工具:
+输出你的思考,然后用以下格式调用一个工具:
+
+{{"name": "工具名称", "parameters": {{"参数名": "参数值"}}}}
+
+系统会执行工具并把结果返回给你。你不需要也不能自己编写工具返回结果。
+
+选项B - 输出最终内容:
+当你已通过工具获取了足够信息,以 "Final Answer:" 开头输出章节内容。
+
+⚠️ 严格禁止:
+- 禁止在一次回复中同时包含工具调用和 Final Answer
+- 禁止自己编造工具返回结果(Observation),所有工具结果由系统注入
+- 每次回复最多调用一个工具
+
+═══════════════════════════════════════════════════════════════
+【章节内容要求】
+═══════════════════════════════════════════════════════════════
+
+1. 内容必须基于工具检索到的模拟数据
+2. 大量引用原文来展示模拟效果
+3. 使用Markdown格式(但禁止使用标题):
+ - 使用 **粗体文字** 标记重点(代替子标题)
+ - 使用列表(-或1.2.3.)组织要点
+ - 使用空行分隔不同段落
+ - ❌ 禁止使用 #、##、###、#### 等任何标题语法
+4. 【引用格式规范 - 必须单独成段】
+ 引用必须独立成段,前后各有一个空行,不能混在段落中:
+
+ ✅ 正确格式:
+ ```
+ 校方的回应被认为缺乏实质内容。
+
+ > "校方的应对模式在瞬息万变的社交媒体环境中显得僵化和迟缓。"
+
+ 这一评价反映了公众的普遍不满。
+ ```
+
+ ❌ 错误格式:
+ ```
+ 校方的回应被认为缺乏实质内容。> "校方的应对模式..." 这一评价反映了...
+ ```
+5. 保持与其他章节的逻辑连贯性
+6. 【避免重复】仔细阅读下方已完成的章节内容,不要重复描述相同的信息
+7. 【再次强调】不要添加任何标题!用**粗体**代替小节标题""",
+
+ 'report_section_user': """\
+已完成的章节内容(请仔细阅读,避免重复):
+{previous_content}
+
+═══════════════════════════════════════════════════════════════
+【当前任务】撰写章节: {section_title}
+═══════════════════════════════════════════════════════════════
+
+【重要提醒】
+1. 仔细阅读上方已完成的章节,避免重复相同的内容!
+2. 开始前必须先调用工具获取模拟数据
+3. 请混合使用不同工具,不要只用一种
+4. 报告内容必须来自检索结果,不要使用自己的知识
+
+【⚠️ 格式警告 - 必须遵守】
+- ❌ 不要写任何标题(#、##、###、####都不行)
+- ❌ 不要写"{section_title}"作为开头
+- ✅ 章节标题由系统自动添加
+- ✅ 直接写正文,用**粗体**代替小节标题
+
+请开始:
+1. 首先思考(Thought)这个章节需要什么信息
+2. 然后调用工具(Action)获取模拟数据
+3. 收集足够信息后输出 Final Answer(纯正文,无任何标题)""",
+
+ # ── report_agent.py: ReACT loop messages ──
+
+ 'report_react_observation': """\
+Observation(检索结果):
+
+═══ 工具 {tool_name} 返回 ═══
+{result}
+
+═══════════════════════════════════════════════════════════════
+已调用工具 {tool_calls_count}/{max_tool_calls} 次(已用: {used_tools_str}){unused_hint}
+- 如果信息充分:以 "Final Answer:" 开头输出章节内容(必须引用上述原文)
+- 如果需要更多信息:调用一个工具继续检索
+═══════════════════════════════════════════════════════════════""",
+
+ 'report_react_insufficient': (
+ "【注意】你只调用了{tool_calls_count}次工具,至少需要{min_tool_calls}次。"
+ "请再调用工具获取更多模拟数据,然后再输出 Final Answer。{unused_hint}"
+ ),
+
+ 'report_react_insufficient_alt': (
+ "当前只调用了 {tool_calls_count} 次工具,至少需要 {min_tool_calls} 次。"
+ "请调用工具获取模拟数据。{unused_hint}"
+ ),
+
+ 'report_react_tool_limit': (
+ "工具调用次数已达上限({tool_calls_count}/{max_tool_calls}),不能再调用工具。"
+ '请立即基于已获取的信息,以 "Final Answer:" 开头输出章节内容。'
+ ),
+
+ 'report_react_unused_hint': "\n\U0001f4a1 你还没有使用过: {unused_list},建议尝试不同工具获取多角度信息",
+
+ 'report_react_force_final': "已达到工具调用限制,请直接输出 Final Answer: 并生成章节内容。",
+
+ # ── report_agent.py: Chat prompt ──
+
+ 'report_chat_system': """\
+你是一个简洁高效的模拟预测助手。
+
+【背景】
+预测条件: {simulation_requirement}
+
+【已生成的分析报告】
+{report_content}
+
+【规则】
+1. 优先基于上述报告内容回答问题
+2. 直接回答问题,避免冗长的思考论述
+3. 仅在报告内容不足以回答时,才调用工具检索更多数据
+4. 回答要简洁、清晰、有条理
+
+【可用工具】(仅在需要时使用,最多调用1-2次)
+{tools_description}
+
+【工具调用格式】
+
+{{"name": "工具名称", "parameters": {{"参数名": "参数值"}}}}
+
+
+【回答风格】
+- 简洁直接,不要长篇大论
+- 使用 > 格式引用关键内容
+- 优先给出结论,再解释原因""",
+
+ # ── ontology_generator.py ──
+
+ 'ontology_system': """\
+你是一个专业的知识图谱本体设计专家。你的任务是分析给定的文本内容和模拟需求,设计适合**社交媒体舆论模拟**的实体类型和关系类型。
+
+**重要:你必须输出有效的JSON格式数据,不要输出任何其他内容。**
+
+## 核心任务背景
+
+我们正在构建一个**社交媒体舆论模拟系统**。在这个系统中:
+- 每个实体都是一个可以在社交媒体上发声、互动、传播信息的"账号"或"主体"
+- 实体之间会相互影响、转发、评论、回应
+- 我们需要模拟舆论事件中各方的反应和信息传播路径
+
+因此,**实体必须是现实中真实存在的、可以在社媒上发声和互动的主体**:
+
+**可以是**:
+- 具体的个人(公众人物、当事人、意见领袖、专家学者、普通人)
+- 公司、企业(包括其官方账号)
+- 组织机构(大学、协会、NGO、工会等)
+- 政府部门、监管机构
+- 媒体机构(报纸、电视台、自媒体、网站)
+- 社交媒体平台本身
+- 特定群体代表(如校友会、粉丝团、维权群体等)
+
+**不可以是**:
+- 抽象概念(如"舆论"、"情绪"、"趋势")
+- 主题/话题(如"学术诚信"、"教育改革")
+- 观点/态度(如"支持方"、"反对方")
+
+## 输出格式
+
+请输出JSON格式,包含以下结构:
+
+```json
+{
+ "entity_types": [
+ {
+ "name": "实体类型名称(英文,PascalCase)",
+ "description": "简短描述(英文,不超过100字符)",
+ "attributes": [
+ {
+ "name": "属性名(英文,snake_case)",
+ "type": "text",
+ "description": "属性描述"
+ }
+ ],
+ "examples": ["示例实体1", "示例实体2"]
+ }
+ ],
+ "edge_types": [
+ {
+ "name": "关系类型名称(英文,UPPER_SNAKE_CASE)",
+ "description": "简短描述(英文,不超过100字符)",
+ "source_targets": [
+ {"source": "源实体类型", "target": "目标实体类型"}
+ ],
+ "attributes": []
+ }
+ ],
+ "analysis_summary": "对文本内容的简要分析说明(中文)"
+}
+```
+
+## 设计指南(极其重要!)
+
+### 1. 实体类型设计 - 必须严格遵守
+
+**数量要求:必须正好10个实体类型**
+
+**层次结构要求(必须同时包含具体类型和兜底类型)**:
+
+你的10个实体类型必须包含以下层次:
+
+A. **兜底类型(必须包含,放在列表最后2个)**:
+ - `Person`: 任何自然人个体的兜底类型。当一个人不属于其他更具体的人物类型时,归入此类。
+ - `Organization`: 任何组织机构的兜底类型。当一个组织不属于其他更具体的组织类型时,归入此类。
+
+B. **具体类型(8个,根据文本内容设计)**:
+ - 针对文本中出现的主要角色,设计更具体的类型
+ - 例如:如果文本涉及学术事件,可以有 `Student`, `Professor`, `University`
+ - 例如:如果文本涉及商业事件,可以有 `Company`, `CEO`, `Employee`
+
+**为什么需要兜底类型**:
+- 文本中会出现各种人物,如"中小学教师"、"路人甲"、"某位网友"
+- 如果没有专门的类型匹配,他们应该被归入 `Person`
+- 同理,小型组织、临时团体等应该归入 `Organization`
+
+**具体类型的设计原则**:
+- 从文本中识别出高频出现或关键的角色类型
+- 每个具体类型应该有明确的边界,避免重叠
+- description 必须清晰说明这个类型和兜底类型的区别
+
+### 2. 关系类型设计
+
+- 数量:6-10个
+- 关系应该反映社媒互动中的真实联系
+- 确保关系的 source_targets 涵盖你定义的实体类型
+
+### 3. 属性设计
+
+- 每个实体类型1-3个关键属性
+- **注意**:属性名不能使用 `name`、`uuid`、`group_id`、`created_at`、`summary`(这些是系统保留字)
+- 推荐使用:`full_name`, `title`, `role`, `position`, `location`, `description` 等
+
+## 实体类型参考
+
+**个人类(具体)**:
+- Student: 学生
+- Professor: 教授/学者
+- Journalist: 记者
+- Celebrity: 明星/网红
+- Executive: 高管
+- Official: 政府官员
+- Lawyer: 律师
+- Doctor: 医生
+
+**个人类(兜底)**:
+- Person: 任何自然人(不属于上述具体类型时使用)
+
+**组织类(具体)**:
+- University: 高校
+- Company: 公司企业
+- GovernmentAgency: 政府机构
+- MediaOutlet: 媒体机构
+- Hospital: 医院
+- School: 中小学
+- NGO: 非政府组织
+
+**组织类(兜底)**:
+- Organization: 任何组织机构(不属于上述具体类型时使用)
+
+## 关系类型参考
+
+- WORKS_FOR: 工作于
+- STUDIES_AT: 就读于
+- AFFILIATED_WITH: 隶属于
+- REPRESENTS: 代表
+- REGULATES: 监管
+- REPORTS_ON: 报道
+- COMMENTS_ON: 评论
+- RESPONDS_TO: 回应
+- SUPPORTS: 支持
+- OPPOSES: 反对
+- COLLABORATES_WITH: 合作
+- COMPETES_WITH: 竞争""",
+
+ # ── simulation_config_generator.py: Time config ──
+
+ 'sim_config_time': """\
+基于以下模拟需求,生成时间模拟配置。
+
+{context}
+
+## 任务
+请生成时间配置JSON。
+
+### 基本原则(仅供参考,需根据具体事件和参与群体灵活调整):
+- 用户群体为中国人,需符合北京时间作息习惯
+- 凌晨0-5点几乎无人活动(活跃度系数0.05)
+- 早上6-8点逐渐活跃(活跃度系数0.4)
+- 工作时间9-18点中等活跃(活跃度系数0.7)
+- 晚间19-22点是高峰期(活跃度系数1.5)
+- 23点后活跃度下降(活跃度系数0.5)
+- 一般规律:凌晨低活跃、早间渐增、工作时段中等、晚间高峰
+- **重要**:以下示例值仅供参考,你需要根据事件性质、参与群体特点来调整具体时段
+ - 例如:学生群体高峰可能是21-23点;媒体全天活跃;官方机构只在工作时间
+ - 例如:突发热点可能导致深夜也有讨论,off_peak_hours 可适当缩短
+
+### 返回JSON格式(不要markdown)
+
+示例:
+{{
+ "total_simulation_hours": 72,
+ "minutes_per_round": 60,
+ "agents_per_hour_min": 5,
+ "agents_per_hour_max": 50,
+ "peak_hours": [19, 20, 21, 22],
+ "off_peak_hours": [0, 1, 2, 3, 4, 5],
+ "morning_hours": [6, 7, 8],
+ "work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
+ "reasoning": "针对该事件的时间配置说明"
+}}
+
+字段说明:
+- total_simulation_hours (int): 模拟总时长,24-168小时,突发事件短、持续话题长
+- minutes_per_round (int): 每轮时长,30-120分钟,建议60分钟
+- agents_per_hour_min (int): 每小时最少激活Agent数(取值范围: 1-{max_agents_allowed})
+- agents_per_hour_max (int): 每小时最多激活Agent数(取值范围: 1-{max_agents_allowed})
+- peak_hours (int数组): 高峰时段,根据事件参与群体调整
+- off_peak_hours (int数组): 低谷时段,通常深夜凌晨
+- morning_hours (int数组): 早间时段
+- work_hours (int数组): 工作时段
+- reasoning (string): 简要说明为什么这样配置""",
+
+ 'sim_config_time_system': "你是社交媒体模拟专家。返回纯JSON格式,时间配置需符合中国人作息习惯。",
+
+ # ── simulation_config_generator.py: Event config ──
+
+ 'sim_config_event': """\
+基于以下模拟需求,生成事件配置。
+
+模拟需求: {simulation_requirement}
+
+{context}
+
+## 可用实体类型及示例
+{type_info}
+
+## 任务
+请生成事件配置JSON:
+- 提取热点话题关键词
+- 描述舆论发展方向
+- 设计初始帖子内容,**每个帖子必须指定 poster_type(发布者类型)**
+
+**重要**: poster_type 必须从上面的"可用实体类型"中选择,这样初始帖子才能分配给合适的 Agent 发布。
+例如:官方声明应由 Official/University 类型发布,新闻由 MediaOutlet 发布,学生观点由 Student 发布。
+
+返回JSON格式(不要markdown):
+{{
+ "hot_topics": ["关键词1", "关键词2", ...],
+ "narrative_direction": "<舆论发展方向描述>",
+ "initial_posts": [
+ {{"content": "帖子内容", "poster_type": "实体类型(必须从可用类型中选择)"}},
+ ...
+ ],
+ "reasoning": "<简要说明>"
+}}""",
+
+ 'sim_config_event_system': "你是舆论分析专家。返回纯JSON格式。注意 poster_type 必须精确匹配可用实体类型。",
+
+ # ── simulation_config_generator.py: Agent config ──
+
+ 'sim_config_agent': """\
+基于以下信息,为每个实体生成社交媒体活动配置。
+
+模拟需求: {simulation_requirement}
+
+## 实体列表
+```json
+{entity_list_json}
+```
+
+## 任务
+为每个实体生成活动配置,注意:
+- **时间符合中国人作息**:凌晨0-5点几乎不活动,晚间19-22点最活跃
+- **官方机构**(University/GovernmentAgency):活跃度低(0.1-0.3),工作时间(9-17)活动,响应慢(60-240分钟),影响力高(2.5-3.0)
+- **媒体**(MediaOutlet):活跃度中(0.4-0.6),全天活动(8-23),响应快(5-30分钟),影响力高(2.0-2.5)
+- **个人**(Student/Person/Alumni):活跃度高(0.6-0.9),主要晚间活动(18-23),响应快(1-15分钟),影响力低(0.8-1.2)
+- **公众人物/专家**:活跃度中(0.4-0.6),影响力中高(1.5-2.0)
+
+返回JSON格式(不要markdown):
+{{
+ "agent_configs": [
+ {{
+ "agent_id": <必须与输入一致>,
+ "activity_level": <0.0-1.0>,
+ "posts_per_hour": <发帖频率>,
+ "comments_per_hour": <评论频率>,
+ "active_hours": [<活跃小时列表,考虑中国人作息>],
+ "response_delay_min": <最小响应延迟分钟>,
+ "response_delay_max": <最大响应延迟分钟>,
+ "sentiment_bias": <-1.0到1.0>,
+ "stance": "",
+ "influence_weight": <影响力权重>
+ }},
+ ...
+ ]
+}}""",
+
+ 'sim_config_agent_system': "你是社交媒体行为分析专家。返回纯JSON,配置需符合中国人作息习惯。",
+
+ # ── oasis_profile_generator.py: Profile prompts ──
+
+ 'profile_individual_system': "你是社交媒体用户画像生成专家。生成详细、真实的人设用于舆论模拟,最大程度还原已有现实情况。必须返回有效的JSON格式,所有字符串值不能包含未转义的换行符。使用中文。",
+
+ 'profile_individual_user': """\
+为实体生成详细的社交媒体用户人设,最大程度还原已有现实情况。
+
+实体名称: {entity_name}
+实体类型: {entity_type}
+实体摘要: {entity_summary}
+实体属性: {attrs_str}
+
+上下文信息:
+{context_str}
+
+请生成JSON,包含以下字段:
+
+1. bio: 社交媒体简介,200字
+2. persona: 详细人设描述(2000字的纯文本),需包含:
+ - 基本信息(年龄、职业、教育背景、所在地)
+ - 人物背景(重要经历、与事件的关联、社会关系)
+ - 性格特征(MBTI类型、核心性格、情绪表达方式)
+ - 社交媒体行为(发帖频率、内容偏好、互动风格、语言特点)
+ - 立场观点(对话题的态度、可能被激怒/感动的内容)
+ - 独特特征(口头禅、特殊经历、个人爱好)
+ - 个人记忆(人设的重要部分,要介绍这个个体与事件的关联,以及这个个体在事件中的已有动作与反应)
+3. age: 年龄数字(必须是整数)
+4. gender: 性别,必须是英文: "male" 或 "female"
+5. mbti: MBTI类型(如INTJ、ENFP等)
+6. country: 国家(使用中文,如"中国")
+7. profession: 职业
+8. interested_topics: 感兴趣话题数组
+
+重要:
+- 所有字段值必须是字符串或数字,不要使用换行符
+- persona必须是一段连贯的文字描述
+- 使用中文(除了gender字段必须用英文male/female)
+- 内容要与实体信息保持一致
+- age必须是有效的整数,gender必须是"male"或"female"
+""",
+
+ 'profile_institutional_system': "你是社交媒体用户画像生成专家。生成详细、真实的人设用于舆论模拟,最大程度还原已有现实情况。必须返回有效的JSON格式,所有字符串值不能包含未转义的换行符。使用中文。",
+
+ 'profile_institutional_user': """\
+为机构/群体实体生成详细的社交媒体账号设定,最大程度还原已有现实情况。
+
+实体名称: {entity_name}
+实体类型: {entity_type}
+实体摘要: {entity_summary}
+实体属性: {attrs_str}
+
+上下文信息:
+{context_str}
+
+请生成JSON,包含以下字段:
+
+1. bio: 官方账号简介,200字,专业得体
+2. persona: 详细账号设定描述(2000字的纯文本),需包含:
+ - 机构基本信息(正式名称、机构性质、成立背景、主要职能)
+ - 账号定位(账号类型、目标受众、核心功能)
+ - 发言风格(语言特点、常用表达、禁忌话题)
+ - 发布内容特点(内容类型、发布频率、活跃时间段)
+ - 立场态度(对核心话题的官方立场、面对争议的处理方式)
+ - 特殊说明(代表的群体画像、运营习惯)
+ - 机构记忆(机构人设的重要部分,要介绍这个机构与事件的关联,以及这个机构在事件中的已有动作与反应)
+3. age: 固定填30(机构账号的虚拟年龄)
+4. gender: 固定填"other"(机构账号使用other表示非个人)
+5. mbti: MBTI类型,用于描述账号风格,如ISTJ代表严谨保守
+6. country: 国家(使用中文,如"中国")
+7. profession: 机构职能描述
+8. interested_topics: 关注领域数组
+
+重要:
+- 所有字段值必须是字符串或数字,不允许null值
+- persona必须是一段连贯的文字描述,不要使用换行符
+- 使用中文(除了gender字段必须用英文"other")
+- age必须是整数30,gender必须是字符串"other"
+- 机构账号发言要符合其身份定位""",
+
+ # ── zep_tools.py: LLM prompts ──
+
+ 'zep_subquery_system': """\
+你是一个专业的问题分析专家。你的任务是将一个复杂问题分解为多个可以在模拟世界中独立观察的子问题。
+
+要求:
+1. 每个子问题应该足够具体,可以在模拟世界中找到相关的Agent行为或事件
+2. 子问题应该覆盖原问题的不同维度(如:谁、什么、为什么、怎么样、何时、何地)
+3. 子问题应该与模拟场景相关
+4. 返回JSON格式:{"sub_queries": ["子问题1", "子问题2", ...]}""",
+
+ 'zep_subquery_user': """模拟需求背景:
+{simulation_requirement}
+
+{report_context_line}
+
+请将以下问题分解为{max_queries}个子问题:
+{query}
+
+返回JSON格式的子问题列表。""",
+
+ 'zep_agent_selection_system': """\
+你是一个专业的采访策划专家。你的任务是根据采访需求,从模拟Agent列表中选择最适合采访的对象。
+
+选择标准:
+1. Agent的身份/职业与采访主题相关
+2. Agent可能持有独特或有价值的观点
+3. 选择多样化的视角(如:支持方、反对方、中立方、专业人士等)
+4. 优先选择与事件直接相关的角色
+
+返回JSON格式:
+{
+ "selected_indices": [选中Agent的索引列表],
+ "reasoning": "选择理由说明"
+}""",
+
+ 'zep_question_gen_system': """\
+你是一个专业的记者/采访者。根据采访需求,生成3-5个深度采访问题。
+
+问题要求:
+1. 开放性问题,鼓励详细回答
+2. 针对不同角色可能有不同答案
+3. 涵盖事实、观点、感受等多个维度
+4. 语言自然,像真实采访一样
+5. 每个问题控制在50字以内,简洁明了
+6. 直接提问,不要包含背景说明或前缀
+
+返回JSON格式:{"questions": ["问题1", "问题2", ...]}""",
+
+ 'zep_summary_system': """\
+你是一个专业的新闻编辑。请根据多位受访者的回答,生成一份采访摘要。
+
+摘要要求:
+1. 提炼各方主要观点
+2. 指出观点的共识和分歧
+3. 突出有价值的引言
+4. 客观中立,不偏袒任何一方
+5. 控制在1000字内
+
+格式约束(必须遵守):
+- 使用纯文本段落,用空行分隔不同部分
+- 不要使用Markdown标题(如#、##、###)
+- 不要使用分割线(如---、***)
+- 引用受访者原话时使用中文引号「」
+- 可以使用**加粗**标记关键词,但不要使用其他Markdown语法""",
+
+ # ── zep_tools.py: Interview prompt prefix ──
+
+ 'interview_prompt_prefix': (
+ "你正在接受一次采访。请结合你的人设、所有的过往记忆与行动,"
+ "以纯文本方式直接回答以下问题。\n"
+ "回复要求:\n"
+ "1. 直接用自然语言回答,不要调用任何工具\n"
+ "2. 不要返回JSON格式或工具调用格式\n"
+ "3. 不要使用Markdown标题(如#、##、###)\n"
+ "4. 按问题编号逐一回答,每个回答以「问题X:」开头(X为问题编号)\n"
+ "5. 每个问题的回答之间用空行分隔\n"
+ "6. 回答要有实质内容,每个问题至少回答2-3句话\n\n"
+ ),
+
+ # ── simulation.py: API interview prefix ──
+
+ 'interview_api_prefix': "结合你的人设、所有的过往记忆与行动,不调用任何工具直接用文本回复我:",
+}
+
+# ═══════════════════════════════════════════════════════════════
+# Format strings for to_text() methods
+# ═══════════════════════════════════════════════════════════════
+
+FORMATS = {
+ # ── SearchResult ──
+ 'search_query': '搜索查询: {query}',
+ 'search_results_found': '找到 {count} 条相关信息',
+ 'search_facts_header': '\n### 相关事实:',
+ 'search_edges_header': '\n### 相关边:',
+ 'search_nodes_header': '\n### 相关节点:',
+
+ # ── NodeInfo / EntityInfo ──
+ 'entity_unknown_type': '未知类型',
+ 'entity_format': '实体: {name} (类型: {type})',
+ 'entity_summary': '摘要: {summary}',
+
+ # ── EdgeInfo ──
+ 'edge_format': '关系: {source} --[{name}]--> {target}',
+ 'edge_fact': '事实: {fact}',
+ 'edge_unknown': '未知',
+ 'edge_until_now': '至今',
+ 'edge_validity': '时效: {valid_at} - {invalid_at}',
+ 'edge_expired': '已过期: {expired_at}',
+
+ # ── InsightForgeResult ──
+ 'insight_header': '## 未来预测深度分析',
+ 'insight_query': '分析问题: {query}',
+ 'insight_scenario': '预测场景: {requirement}',
+ 'insight_stats_header': '\n### 预测数据统计',
+ 'insight_facts_count': '相关预测事实: {count}条',
+ 'insight_entities_count': '涉及实体: {count}个',
+ 'insight_relations_count': '关系链: {count}条',
+ 'insight_subqueries_header': '\n### 分析的子问题',
+ 'insight_key_facts_header': '\n### 【关键事实】(请在报告中引用这些原文)',
+ 'insight_entities_header': '\n### 【核心实体】',
+ 'insight_entity_format': '- **{name}** ({type})',
+ 'insight_entity_summary': ' 摘要: \"{summary}\"',
+ 'insight_entity_facts': ' 相关事实: {count}条',
+ 'insight_relations_header': '\n### 【关系链】',
+
+ # ── PanoramaResult ──
+ 'panorama_header': '## 广度搜索结果(未来全景视图)',
+ 'panorama_query': '查询: {query}',
+ 'panorama_stats_header': '\n### 统计信息',
+ 'panorama_nodes': '- 总节点数: {count}',
+ 'panorama_edges': '- 总边数: {count}',
+ 'panorama_active': '- 当前有效事实: {count}条',
+ 'panorama_historical': '- 历史/过期事实: {count}条',
+ 'panorama_active_header': '\n### 【当前有效事实】(模拟结果原文)',
+ 'panorama_historical_header': '\n### 【历史/过期事实】(演变过程记录)',
+ 'panorama_entities_header': '\n### 【涉及实体】',
+
+ # ── InterviewResult ──
+ 'interview_header': '## 深度采访报告',
+ 'interview_topic': '**采访主题:** {topic}',
+ 'interview_count': '**采访人数:** {interviewed} / {total} 位模拟Agent',
+ 'interview_selection_header': '\n### 采访对象选择理由',
+ 'interview_auto_selection': '(自动选择)',
+ 'interview_records_header': '\n### 采访实录',
+ 'interview_entry': '\n#### 采访 #{index}: {name}',
+ 'interview_no_record': '(无采访记录)',
+ 'interview_summary_header': '\n### 采访摘要与核心观点',
+ 'interview_no_summary': '(无摘要)',
+
+ # ── AgentInterview ──
+ 'agent_bio': '_简介: {bio}_',
+ 'agent_key_quotes': '**关键引言:**',
+
+ # ── Interview response formatting ──
+ 'platform_no_response': '(该平台未获得回复)',
+ 'platform_twitter_header': '【Twitter平台回答】',
+ 'platform_reddit_header': '【Reddit平台回答】',
+
+ # ── Platform display names (from zep_graph_memory_updater.py) ──
+ 'platform_twitter': '世界1',
+ 'platform_reddit': '世界2',
+
+ # ── Agent activity descriptions (from zep_graph_memory_updater.py) ──
+ 'action_create_post_with_content': '发布了一条帖子:「{content}」',
+ 'action_create_post': '发布了一条帖子',
+ 'action_like_post_with_both': '点赞了{author}的帖子:「{content}」',
+ 'action_like_post_with_content': '点赞了一条帖子:「{content}」',
+ 'action_like_post_with_author': '点赞了{author}的一条帖子',
+ 'action_like_post': '点赞了一条帖子',
+ 'action_dislike_post_with_both': '踩了{author}的帖子:「{content}」',
+ 'action_dislike_post_with_content': '踩了一条帖子:「{content}」',
+ 'action_dislike_post_with_author': '踩了{author}的一条帖子',
+ 'action_dislike_post': '踩了一条帖子',
+ 'action_repost_with_both': '转发了{author}的帖子:「{content}」',
+ 'action_repost_with_content': '转发了一条帖子:「{content}」',
+ 'action_repost_with_author': '转发了{author}的一条帖子',
+ 'action_repost': '转发了一条帖子',
+ 'action_quote_with_both': '引用了{author}的帖子「{content}」',
+ 'action_quote_with_content': '引用了一条帖子「{content}」',
+ 'action_quote_with_author': '引用了{author}的一条帖子',
+ 'action_quote': '引用了一条帖子',
+ 'action_quote_comment': ',并评论道:「{content}」',
+ 'action_follow_with_name': '关注了用户「{name}」',
+ 'action_follow': '关注了一个用户',
+ 'action_comment_full': '在{author}的帖子「{post_content}」下评论道:「{content}」',
+ 'action_comment_with_content_only': '在帖子「{post_content}」下评论道:「{content}」',
+ 'action_comment_with_author': '在{author}的帖子下评论道:「{content}」',
+ 'action_comment_content': '评论道:「{content}」',
+ 'action_comment': '发表了评论',
+ 'action_like_comment_with_both': '点赞了{author}的评论:「{content}」',
+ 'action_like_comment_with_content': '点赞了一条评论:「{content}」',
+ 'action_like_comment_with_author': '点赞了{author}的一条评论',
+ 'action_like_comment': '点赞了一条评论',
+ 'action_dislike_comment_with_both': '踩了{author}的评论:「{content}」',
+ 'action_dislike_comment_with_content': '踩了一条评论:「{content}」',
+ 'action_dislike_comment_with_author': '踩了{author}的一条评论',
+ 'action_dislike_comment': '踩了一条评论',
+ 'action_search_with_query': '搜索了「{query}」',
+ 'action_search': '进行了搜索',
+ 'action_search_user_with_query': '搜索了用户「{query}」',
+ 'action_search_user': '搜索了用户',
+ 'action_mute_with_name': '屏蔽了用户「{name}」',
+ 'action_mute': '屏蔽了一个用户',
+ 'action_generic': '执行了{action_type}操作',
+}
+
+# ═══════════════════════════════════════════════════════════════
+# Short strings (fallbacks, errors, progress, UI)
+# ═══════════════════════════════════════════════════════════════
+
+STRINGS = {
+ # ── report_agent.py: fallback outline ──
+ 'report_default_title': '未来预测报告',
+ 'report_default_summary': '基于模拟预测的未来趋势与风险分析',
+ 'report_default_section1': '预测场景与核心发现',
+ 'report_default_section2': '人群行为预测分析',
+ 'report_default_section3': '趋势展望与风险提示',
+ 'report_fallback_title': '模拟分析报告',
+ 'report_first_section': '(这是第一个章节)',
+ 'report_empty_response': '(响应为空)',
+ 'report_continue': '请继续生成内容。',
+ 'report_no_report': '(暂无报告)',
+ 'report_truncated': '... [报告内容已截断] ...',
+ 'report_unused_tools_hint': '这些工具还未使用,推荐用一下他们: {tools}',
+ 'report_tool_result': '[{tool}结果]',
+ 'report_unknown_tool': '未知工具: {tool_name}。请使用以下工具之一: {available}',
+ 'report_tool_failed': '工具执行失败: {error}',
+ 'report_chat_observation_suffix': '\n\n请简洁回答问题。',
+ 'report_section_gen_failed': '(本章节生成失败:LLM 返回空响应,请稍后重试)',
+ 'report_conflict_format_error': (
+ "【格式错误】你在一次回复中同时包含了工具调用和 Final Answer,这是不允许的。\n"
+ "每次回复只能做以下两件事之一:\n"
+ "- 调用一个工具(输出一个 块,不要写 Final Answer)\n"
+ "- 输出最终内容(以 'Final Answer:' 开头,不要包含 )\n"
+ "请重新回复,只做其中一件事。"
+ ),
+
+ # ── report_agent.py: progress messages ──
+ 'progress_analyzing': '正在分析模拟需求...',
+ 'progress_generating_outline': '正在生成报告大纲...',
+ 'progress_parsing_outline': '正在解析大纲结构...',
+ 'progress_outline_done': '大纲规划完成',
+ 'progress_outline_sections': '大纲规划完成,共{count}个章节',
+ 'progress_generating_section': '正在生成章节: {title} ({num}/{total})',
+ 'progress_section_done': '章节 {title} 已完成',
+ 'progress_assembling': '正在组装完整报告...',
+ 'progress_report_done': '报告生成完成',
+ 'progress_report_failed': '报告生成失败: {error}',
+ 'progress_deep_search': '深度检索与撰写中 ({count}/{max})',
+ 'progress_init_report': '初始化报告...',
+ 'progress_start_outline': '开始规划报告大纲...',
+
+ # ── report_agent.py: tool parameter descriptions ──
+ 'param_insight_query': '你想深入分析的问题或话题',
+ 'param_insight_context': '当前报告章节的上下文(可选,有助于生成更精准的子问题)',
+ 'param_panorama_query': '搜索查询,用于相关性排序',
+ 'param_panorama_expired': '是否包含过期/历史内容(默认True)',
+ 'param_quick_query': '搜索查询字符串',
+ 'param_quick_limit': '返回结果数量(可选,默认10)',
+ 'param_interview_topic': "采访主题或需求描述(如:'了解学生对宿舍甲醛事件的看法')",
+ 'param_interview_max': '最多采访的Agent数量(可选,默认5,最大10)',
+ 'tools_available_prefix': '可用工具:',
+ 'tools_params_prefix': ' 参数: ',
+
+ # ── zep_tools.py: interview fallbacks ──
+ 'interview_no_profiles': '未找到可采访的Agent人设文件',
+ 'interview_api_failed': '采访API调用失败:{error}。请检查OASIS模拟环境状态。',
+ 'interview_failed': '采访失败:{error}。模拟环境可能已关闭,请确保OASIS环境正在运行。',
+ 'interview_error': '采访过程发生错误:{error}',
+ 'interview_auto_reason': '基于相关性自动选择',
+ 'interview_default_strategy': '使用默认选择策略',
+ 'interview_default_q1': '关于{topic},您有什么看法?',
+ 'interview_default_q2': '关于{topic},您的观点是什么?',
+ 'interview_default_q3': '这件事对您或您所代表的群体有什么影响?',
+ 'interview_default_q4': '您认为应该如何解决或改进这个问题?',
+ 'interview_none_completed': '未完成任何采访',
+ 'interview_summary_prefix': '共采访了{count}位受访者,包括:',
+
+ # ── zep_tools.py: sub-query fallbacks ──
+ 'subquery_main_participants': '{query} 的主要参与者',
+ 'subquery_causes_effects': '{query} 的原因和影响',
+ 'subquery_development': '{query} 的发展过程',
+
+ # ── oasis_profile_generator.py: fallbacks ──
+ 'profile_default_country': '中国',
+ 'profile_persona_fallback': '{name}是一个{type}。',
+ 'profile_zep_search_query': '关于{entity_name}的所有信息、活动、事件、关系和背景',
+
+ # ── ontology_generator.py: user message building ──
+ 'ontology_user_sim_req': '## 模拟需求',
+ 'ontology_user_doc_content': '## 文档内容',
+ 'ontology_user_extra': '## 额外说明',
+ 'ontology_user_instruction': (
+ "请根据以上内容,设计适合社会舆论模拟的实体类型和关系类型。\n\n"
+ "**必须遵守的规则**:\n"
+ "1. 必须正好输出10个实体类型\n"
+ "2. 最后2个必须是兜底类型:Person(个人兜底)和 Organization(组织兜底)\n"
+ "3. 前8个是根据文本内容设计的具体类型\n"
+ "4. 所有实体类型必须是现实中可以发声的主体,不能是抽象概念\n"
+ "5. 属性名不能使用 name、uuid、group_id 等保留字,用 full_name、org_name 等替代"
+ ),
+ 'ontology_text_truncated': '\n\n...(原文共{original_length}字,已截取前{max_length}字用于本体分析)...',
+
+ # ── simulation_config_generator.py: context building ──
+ 'sim_config_context_req': '## 模拟需求',
+ 'sim_config_context_entities': '## 实体信息 ({count}个)',
+ 'sim_config_context_docs': '## 原始文档内容',
+ 'sim_config_entity_header': '\n### {type} ({count}个)',
+ 'sim_config_entity_more': '... 还有 {count} 个',
+ 'sim_config_doc_truncated': '\n...(文档已截断)',
+ 'sim_config_default_time_reasoning': '使用默认中国人作息配置(每轮1小时)',
+ 'sim_config_default_event_reasoning': '使用默认配置',
+}
+
+# ═══════════════════════════════════════════════════════════════
+# Regex patterns for frontend parsing (must match FORMATS above)
+# ═══════════════════════════════════════════════════════════════
+
+PATTERNS = {
+ # ── InsightForge parsing ──
+ 'insight_query': r'分析问题:\s*(.+?)(?:\n|$)',
+ 'insight_scenario': r'预测场景:\s*(.+?)(?:\n|$)',
+ 'insight_facts_count': r'相关预测事实:\s*(\d+)',
+ 'insight_entities_count': r'涉及实体:\s*(\d+)',
+ 'insight_relations_count': r'关系链:\s*(\d+)',
+ 'insight_subqueries': r'### 分析的子问题\n([\s\S]*?)(?=\n###|$)',
+ 'insight_key_facts': r'### 【关键事实】[\s\S]*?\n([\s\S]*?)(?=\n###|$)',
+ 'insight_entities': r'### 【核心实体】\n([\s\S]*?)(?=\n###|$)',
+ 'insight_entity_summary': r'摘要:\s*"?(.+?)"?(?:\n|$)',
+ 'insight_entity_facts': r'相关事实:\s*(\d+)',
+ 'insight_relations': r'### 【关系链】\n([\s\S]*?)(?=\n###|$)',
+
+ # ── Panorama parsing ──
+ 'panorama_query': r'查询:\s*(.+?)(?:\n|$)',
+ 'panorama_nodes': r'总节点数:\s*(\d+)',
+ 'panorama_edges': r'总边数:\s*(\d+)',
+ 'panorama_active': r'当前有效事实:\s*(\d+)',
+ 'panorama_historical': r'历史\/过期事实:\s*(\d+)',
+ 'panorama_active_section': r'### 【当前有效事实】[\s\S]*?\n([\s\S]*?)(?=\n###|$)',
+ 'panorama_historical_section': r'### 【历史\/过期事实】[\s\S]*?\n([\s\S]*?)(?=\n###|$)',
+ 'panorama_entities_section': r'### 【涉及实体】\n([\s\S]*?)(?=\n###|$)',
+
+ # ── Interview parsing ──
+ 'interview_topic': r'\*\*采访主题:\*\*\s*(.+?)(?:\n|$)',
+ 'interview_count': r'\*\*采访人数:\*\*\s*(\d+)\s*\/\s*(\d+)',
+ 'interview_selection': r'### 采访对象选择理由\n([\s\S]*?)(?=\n---\n|\n### 采访实录)',
+ 'interview_split': r'#### 采访 #\d+:',
+ 'interview_bio': r'_简介:\s*([\s\S]*?)_\n',
+ 'interview_twitter': r'【Twitter平台回答】\n?([\s\S]*?)(?=【Reddit平台回答】|$)',
+ 'interview_reddit': r'【Reddit平台回答】\n?([\s\S]*?)$',
+ 'interview_quotes': r'\*\*关键引言:\*\*\n([\s\S]*?)(?=\n---|\n####|$)',
+ 'interview_summary': r'### 采访摘要与核心观点\n([\s\S]*?)$',
+
+ # ── QuickSearch parsing ──
+ 'quick_query': r'搜索查询:\s*(.+?)(?:\n|$)',
+ 'quick_count': r'找到\s*(\d+)\s*条',
+ 'quick_facts': r'### 相关事实:\n([\s\S]*)$',
+ 'quick_edges': r'### 相关边:\n([\s\S]*?)(?=\n###|$)',
+ 'quick_nodes': r'### 相关节点:\n([\s\S]*?)(?=\n###|$)',
+
+ # ── Other ──
+ 'final_answer': r'最终答案[::]\s*\n*([\s\S]*)$',
+ 'question_split': r'(?:^|[\r\n]+)问题(\d+)[::]\s*',
+ 'question_strip': r'^问题\d+[::]\s*',
+ 'platform_no_response': '(该平台未获得回复)',
+ 'no_reply': '[无回复]',
+}
diff --git a/backend/app/services/oasis_profile_generator.py b/backend/app/services/oasis_profile_generator.py
index 57836c539..3e004ce7b 100644
--- a/backend/app/services/oasis_profile_generator.py
+++ b/backend/app/services/oasis_profile_generator.py
@@ -20,6 +20,7 @@
from ..config import Config
from ..utils.logger import get_logger
+from ..i18n import get_prompt, get_format, get_string
from .zep_entity_reader import EntityNode, ZepEntityReader
logger = get_logger('mirofish.oasis_profile')
@@ -257,7 +258,7 @@ def generate_profile_from_entity(
user_name=user_name,
name=name,
bio=profile_data.get("bio", f"{entity_type}: {name}"),
- persona=profile_data.get("persona", entity.summary or f"A {entity_type} named {name}."),
+ persona=profile_data.get("persona", entity.summary or get_string('profile_persona_fallback', name=name, type=entity_type)),
karma=profile_data.get("karma", random.randint(500, 5000)),
friend_count=profile_data.get("friend_count", random.randint(50, 500)),
follower_count=profile_data.get("follower_count", random.randint(100, 1000)),
@@ -313,7 +314,7 @@ def _search_zep_for_entity(self, entity: EntityNode) -> Dict[str, Any]:
logger.debug(f"跳过Zep检索:未设置graph_id")
return results
- comprehensive_query = f"关于{entity_name}的所有信息、活动、事件、关系和背景"
+ comprehensive_query = get_string('profile_zep_search_query', entity_name=entity_name)
def search_edges():
"""搜索边(事实/关系)- 带重试机制"""
@@ -553,7 +554,7 @@ def _generate_profile_with_llm(
if "bio" not in result or not result["bio"]:
result["bio"] = entity_summary[:200] if entity_summary else f"{entity_type}: {entity_name}"
if "persona" not in result or not result["persona"]:
- result["persona"] = entity_summary or f"{entity_name}是一个{entity_type}。"
+ result["persona"] = entity_summary or get_string('profile_persona_fallback', name=entity_name, type=entity_type)
return result
@@ -650,7 +651,7 @@ def fix_string_newlines(match):
persona_match = re.search(r'"persona"\s*:\s*"([^"]*)', content) # 可能被截断
bio = bio_match.group(1) if bio_match else (entity_summary[:200] if entity_summary else f"{entity_type}: {entity_name}")
- persona = persona_match.group(1) if persona_match else (entity_summary or f"{entity_name}是一个{entity_type}。")
+ persona = persona_match.group(1) if persona_match else (entity_summary or get_string('profile_persona_fallback', name=entity_name, type=entity_type))
# 如果提取到了有意义的内容,标记为已修复
if bio_match or persona_match:
@@ -665,13 +666,12 @@ def fix_string_newlines(match):
logger.warning(f"JSON修复失败,返回基础结构")
return {
"bio": entity_summary[:200] if entity_summary else f"{entity_type}: {entity_name}",
- "persona": entity_summary or f"{entity_name}是一个{entity_type}。"
+ "persona": entity_summary or get_string('profile_persona_fallback', name=entity_name, type=entity_type)
}
def _get_system_prompt(self, is_individual: bool) -> str:
"""获取系统提示词"""
- base_prompt = "你是社交媒体用户画像生成专家。生成详细、真实的人设用于舆论模拟,最大程度还原已有现实情况。必须返回有效的JSON格式,所有字符串值不能包含未转义的换行符。使用中文。"
- return base_prompt
+ return get_prompt('profile_individual_system')
def _build_individual_persona_prompt(
self,
@@ -686,41 +686,13 @@ def _build_individual_persona_prompt(
attrs_str = json.dumps(entity_attributes, ensure_ascii=False) if entity_attributes else "无"
context_str = context[:3000] if context else "无额外上下文"
- return f"""为实体生成详细的社交媒体用户人设,最大程度还原已有现实情况。
-
-实体名称: {entity_name}
-实体类型: {entity_type}
-实体摘要: {entity_summary}
-实体属性: {attrs_str}
-
-上下文信息:
-{context_str}
-
-请生成JSON,包含以下字段:
-
-1. bio: 社交媒体简介,200字
-2. persona: 详细人设描述(2000字的纯文本),需包含:
- - 基本信息(年龄、职业、教育背景、所在地)
- - 人物背景(重要经历、与事件的关联、社会关系)
- - 性格特征(MBTI类型、核心性格、情绪表达方式)
- - 社交媒体行为(发帖频率、内容偏好、互动风格、语言特点)
- - 立场观点(对话题的态度、可能被激怒/感动的内容)
- - 独特特征(口头禅、特殊经历、个人爱好)
- - 个人记忆(人设的重要部分,要介绍这个个体与事件的关联,以及这个个体在事件中的已有动作与反应)
-3. age: 年龄数字(必须是整数)
-4. gender: 性别,必须是英文: "male" 或 "female"
-5. mbti: MBTI类型(如INTJ、ENFP等)
-6. country: 国家(使用中文,如"中国")
-7. profession: 职业
-8. interested_topics: 感兴趣话题数组
-
-重要:
-- 所有字段值必须是字符串或数字,不要使用换行符
-- persona必须是一段连贯的文字描述
-- 使用中文(除了gender字段必须用英文male/female)
-- 内容要与实体信息保持一致
-- age必须是有效的整数,gender必须是"male"或"female"
-"""
+ return get_prompt('profile_individual_user').format(
+ entity_name=entity_name,
+ entity_type=entity_type,
+ entity_summary=entity_summary,
+ attrs_str=attrs_str,
+ context_str=context_str
+ )
def _build_group_persona_prompt(
self,
@@ -735,40 +707,13 @@ def _build_group_persona_prompt(
attrs_str = json.dumps(entity_attributes, ensure_ascii=False) if entity_attributes else "无"
context_str = context[:3000] if context else "无额外上下文"
- return f"""为机构/群体实体生成详细的社交媒体账号设定,最大程度还原已有现实情况。
-
-实体名称: {entity_name}
-实体类型: {entity_type}
-实体摘要: {entity_summary}
-实体属性: {attrs_str}
-
-上下文信息:
-{context_str}
-
-请生成JSON,包含以下字段:
-
-1. bio: 官方账号简介,200字,专业得体
-2. persona: 详细账号设定描述(2000字的纯文本),需包含:
- - 机构基本信息(正式名称、机构性质、成立背景、主要职能)
- - 账号定位(账号类型、目标受众、核心功能)
- - 发言风格(语言特点、常用表达、禁忌话题)
- - 发布内容特点(内容类型、发布频率、活跃时间段)
- - 立场态度(对核心话题的官方立场、面对争议的处理方式)
- - 特殊说明(代表的群体画像、运营习惯)
- - 机构记忆(机构人设的重要部分,要介绍这个机构与事件的关联,以及这个机构在事件中的已有动作与反应)
-3. age: 固定填30(机构账号的虚拟年龄)
-4. gender: 固定填"other"(机构账号使用other表示非个人)
-5. mbti: MBTI类型,用于描述账号风格,如ISTJ代表严谨保守
-6. country: 国家(使用中文,如"中国")
-7. profession: 机构职能描述
-8. interested_topics: 关注领域数组
-
-重要:
-- 所有字段值必须是字符串或数字,不允许null值
-- persona必须是一段连贯的文字描述,不要使用换行符
-- 使用中文(除了gender字段必须用英文"other")
-- age必须是整数30,gender必须是字符串"other"
-- 机构账号发言要符合其身份定位"""
+ return get_prompt('profile_institutional_user').format(
+ entity_name=entity_name,
+ entity_type=entity_type,
+ entity_summary=entity_summary,
+ attrs_str=attrs_str,
+ context_str=context_str
+ )
def _generate_profile_rule_based(
self,
diff --git a/backend/app/services/ontology_generator.py b/backend/app/services/ontology_generator.py
index 2d3e39bd8..54814875f 100644
--- a/backend/app/services/ontology_generator.py
+++ b/backend/app/services/ontology_generator.py
@@ -6,153 +6,11 @@
import json
from typing import Dict, Any, List, Optional
from ..utils.llm_client import LLMClient
+from ..i18n import get_prompt, get_format, get_string
-# 本体生成的系统提示词
-ONTOLOGY_SYSTEM_PROMPT = """你是一个专业的知识图谱本体设计专家。你的任务是分析给定的文本内容和模拟需求,设计适合**社交媒体舆论模拟**的实体类型和关系类型。
-
-**重要:你必须输出有效的JSON格式数据,不要输出任何其他内容。**
-
-## 核心任务背景
-
-我们正在构建一个**社交媒体舆论模拟系统**。在这个系统中:
-- 每个实体都是一个可以在社交媒体上发声、互动、传播信息的"账号"或"主体"
-- 实体之间会相互影响、转发、评论、回应
-- 我们需要模拟舆论事件中各方的反应和信息传播路径
-
-因此,**实体必须是现实中真实存在的、可以在社媒上发声和互动的主体**:
-
-**可以是**:
-- 具体的个人(公众人物、当事人、意见领袖、专家学者、普通人)
-- 公司、企业(包括其官方账号)
-- 组织机构(大学、协会、NGO、工会等)
-- 政府部门、监管机构
-- 媒体机构(报纸、电视台、自媒体、网站)
-- 社交媒体平台本身
-- 特定群体代表(如校友会、粉丝团、维权群体等)
-
-**不可以是**:
-- 抽象概念(如"舆论"、"情绪"、"趋势")
-- 主题/话题(如"学术诚信"、"教育改革")
-- 观点/态度(如"支持方"、"反对方")
-
-## 输出格式
-
-请输出JSON格式,包含以下结构:
-
-```json
-{
- "entity_types": [
- {
- "name": "实体类型名称(英文,PascalCase)",
- "description": "简短描述(英文,不超过100字符)",
- "attributes": [
- {
- "name": "属性名(英文,snake_case)",
- "type": "text",
- "description": "属性描述"
- }
- ],
- "examples": ["示例实体1", "示例实体2"]
- }
- ],
- "edge_types": [
- {
- "name": "关系类型名称(英文,UPPER_SNAKE_CASE)",
- "description": "简短描述(英文,不超过100字符)",
- "source_targets": [
- {"source": "源实体类型", "target": "目标实体类型"}
- ],
- "attributes": []
- }
- ],
- "analysis_summary": "对文本内容的简要分析说明(中文)"
-}
-```
-
-## 设计指南(极其重要!)
-
-### 1. 实体类型设计 - 必须严格遵守
-
-**数量要求:必须正好10个实体类型**
-
-**层次结构要求(必须同时包含具体类型和兜底类型)**:
-
-你的10个实体类型必须包含以下层次:
-
-A. **兜底类型(必须包含,放在列表最后2个)**:
- - `Person`: 任何自然人个体的兜底类型。当一个人不属于其他更具体的人物类型时,归入此类。
- - `Organization`: 任何组织机构的兜底类型。当一个组织不属于其他更具体的组织类型时,归入此类。
-
-B. **具体类型(8个,根据文本内容设计)**:
- - 针对文本中出现的主要角色,设计更具体的类型
- - 例如:如果文本涉及学术事件,可以有 `Student`, `Professor`, `University`
- - 例如:如果文本涉及商业事件,可以有 `Company`, `CEO`, `Employee`
-
-**为什么需要兜底类型**:
-- 文本中会出现各种人物,如"中小学教师"、"路人甲"、"某位网友"
-- 如果没有专门的类型匹配,他们应该被归入 `Person`
-- 同理,小型组织、临时团体等应该归入 `Organization`
-
-**具体类型的设计原则**:
-- 从文本中识别出高频出现或关键的角色类型
-- 每个具体类型应该有明确的边界,避免重叠
-- description 必须清晰说明这个类型和兜底类型的区别
-
-### 2. 关系类型设计
-
-- 数量:6-10个
-- 关系应该反映社媒互动中的真实联系
-- 确保关系的 source_targets 涵盖你定义的实体类型
-
-### 3. 属性设计
-
-- 每个实体类型1-3个关键属性
-- **注意**:属性名不能使用 `name`、`uuid`、`group_id`、`created_at`、`summary`(这些是系统保留字)
-- 推荐使用:`full_name`, `title`, `role`, `position`, `location`, `description` 等
-
-## 实体类型参考
-
-**个人类(具体)**:
-- Student: 学生
-- Professor: 教授/学者
-- Journalist: 记者
-- Celebrity: 明星/网红
-- Executive: 高管
-- Official: 政府官员
-- Lawyer: 律师
-- Doctor: 医生
-
-**个人类(兜底)**:
-- Person: 任何自然人(不属于上述具体类型时使用)
-
-**组织类(具体)**:
-- University: 高校
-- Company: 公司企业
-- GovernmentAgency: 政府机构
-- MediaOutlet: 媒体机构
-- Hospital: 医院
-- School: 中小学
-- NGO: 非政府组织
-
-**组织类(兜底)**:
-- Organization: 任何组织机构(不属于上述具体类型时使用)
-
-## 关系类型参考
-
-- WORKS_FOR: 工作于
-- STUDIES_AT: 就读于
-- AFFILIATED_WITH: 隶属于
-- REPRESENTS: 代表
-- REGULATES: 监管
-- REPORTS_ON: 报道
-- COMMENTS_ON: 评论
-- RESPONDS_TO: 回应
-- SUPPORTS: 支持
-- OPPOSES: 反对
-- COLLABORATES_WITH: 合作
-- COMPETES_WITH: 竞争
-"""
+# 本体生成的系统提示词(从 i18n 加载)
+ONTOLOGY_SYSTEM_PROMPT = get_prompt('ontology_system')
class OntologyGenerator:
@@ -223,34 +81,14 @@ def _build_user_message(
# 如果文本超过5万字,截断(仅影响传给LLM的内容,不影响图谱构建)
if len(combined_text) > self.MAX_TEXT_LENGTH_FOR_LLM:
combined_text = combined_text[:self.MAX_TEXT_LENGTH_FOR_LLM]
- combined_text += f"\n\n...(原文共{original_length}字,已截取前{self.MAX_TEXT_LENGTH_FOR_LLM}字用于本体分析)..."
-
- message = f"""## 模拟需求
+ combined_text += get_string('ontology_text_truncated', original_length=original_length, max_length=self.MAX_TEXT_LENGTH_FOR_LLM)
-{simulation_requirement}
+ message = f"{get_string('ontology_user_sim_req')}\n\n{simulation_requirement}\n\n{get_string('ontology_user_doc_content')}\n\n{combined_text}\n"
-## 文档内容
-
-{combined_text}
-"""
-
if additional_context:
- message += f"""
-## 额外说明
+ message += f"\n{get_string('ontology_user_extra')}\n\n{additional_context}\n"
-{additional_context}
-"""
-
- message += """
-请根据以上内容,设计适合社会舆论模拟的实体类型和关系类型。
-
-**必须遵守的规则**:
-1. 必须正好输出10个实体类型
-2. 最后2个必须是兜底类型:Person(个人兜底)和 Organization(组织兜底)
-3. 前8个是根据文本内容设计的具体类型
-4. 所有实体类型必须是现实中可以发声的主体,不能是抽象概念
-5. 属性名不能使用 name、uuid、group_id 等保留字,用 full_name、org_name 等替代
-"""
+ message += f"\n{get_string('ontology_user_instruction')}\n"
return message
diff --git a/backend/app/services/report_agent.py b/backend/app/services/report_agent.py
index 02ca5bdc2..cb1333dc4 100644
--- a/backend/app/services/report_agent.py
+++ b/backend/app/services/report_agent.py
@@ -21,10 +21,11 @@
from ..config import Config
from ..utils.llm_client import LLMClient
from ..utils.logger import get_logger
+from ..i18n import get_prompt, get_format, get_string
from .zep_tools import (
- ZepToolsService,
- SearchResult,
- InsightForgeResult,
+ ZepToolsService,
+ SearchResult,
+ InsightForgeResult,
PanoramaResult,
InterviewResult
)
@@ -467,393 +468,48 @@ def to_dict(self) -> Dict[str, Any]:
# ═══════════════════════════════════════════════════════════════
-# Prompt 模板常量
+# Prompt 模板常量(从 i18n 加载)
# ═══════════════════════════════════════════════════════════════
# ── 工具描述 ──
-TOOL_DESC_INSIGHT_FORGE = """\
-【深度洞察检索 - 强大的检索工具】
-这是我们强大的检索函数,专为深度分析设计。它会:
-1. 自动将你的问题分解为多个子问题
-2. 从多个维度检索模拟图谱中的信息
-3. 整合语义搜索、实体分析、关系链追踪的结果
-4. 返回最全面、最深度的检索内容
-
-【使用场景】
-- 需要深入分析某个话题
-- 需要了解事件的多个方面
-- 需要获取支撑报告章节的丰富素材
-
-【返回内容】
-- 相关事实原文(可直接引用)
-- 核心实体洞察
-- 关系链分析"""
-
-TOOL_DESC_PANORAMA_SEARCH = """\
-【广度搜索 - 获取全貌视图】
-这个工具用于获取模拟结果的完整全貌,特别适合了解事件演变过程。它会:
-1. 获取所有相关节点和关系
-2. 区分当前有效的事实和历史/过期的事实
-3. 帮助你了解舆情是如何演变的
-
-【使用场景】
-- 需要了解事件的完整发展脉络
-- 需要对比不同阶段的舆情变化
-- 需要获取全面的实体和关系信息
-
-【返回内容】
-- 当前有效事实(模拟最新结果)
-- 历史/过期事实(演变记录)
-- 所有涉及的实体"""
-
-TOOL_DESC_QUICK_SEARCH = """\
-【简单搜索 - 快速检索】
-轻量级的快速检索工具,适合简单、直接的信息查询。
-
-【使用场景】
-- 需要快速查找某个具体信息
-- 需要验证某个事实
-- 简单的信息检索
-
-【返回内容】
-- 与查询最相关的事实列表"""
-
-TOOL_DESC_INTERVIEW_AGENTS = """\
-【深度采访 - 真实Agent采访(双平台)】
-调用OASIS模拟环境的采访API,对正在运行的模拟Agent进行真实采访!
-这不是LLM模拟,而是调用真实的采访接口获取模拟Agent的原始回答。
-默认在Twitter和Reddit两个平台同时采访,获取更全面的观点。
-
-功能流程:
-1. 自动读取人设文件,了解所有模拟Agent
-2. 智能选择与采访主题最相关的Agent(如学生、媒体、官方等)
-3. 自动生成采访问题
-4. 调用 /api/simulation/interview/batch 接口在双平台进行真实采访
-5. 整合所有采访结果,提供多视角分析
-
-【使用场景】
-- 需要从不同角色视角了解事件看法(学生怎么看?媒体怎么看?官方怎么说?)
-- 需要收集多方意见和立场
-- 需要获取模拟Agent的真实回答(来自OASIS模拟环境)
-- 想让报告更生动,包含"采访实录"
-
-【返回内容】
-- 被采访Agent的身份信息
-- 各Agent在Twitter和Reddit两个平台的采访回答
-- 关键引言(可直接引用)
-- 采访摘要和观点对比
-
-【重要】需要OASIS模拟环境正在运行才能使用此功能!"""
+TOOL_DESC_INSIGHT_FORGE = get_prompt('report_tool_desc_insight')
-# ── 大纲规划 prompt ──
-
-PLAN_SYSTEM_PROMPT = """\
-你是一个「未来预测报告」的撰写专家,拥有对模拟世界的「上帝视角」——你可以洞察模拟中每一位Agent的行为、言论和互动。
-
-【核心理念】
-我们构建了一个模拟世界,并向其中注入了特定的「模拟需求」作为变量。模拟世界的演化结果,就是对未来可能发生情况的预测。你正在观察的不是"实验数据",而是"未来的预演"。
-
-【你的任务】
-撰写一份「未来预测报告」,回答:
-1. 在我们设定的条件下,未来发生了什么?
-2. 各类Agent(人群)是如何反应和行动?
-3. 这个模拟揭示了哪些值得关注的未来趋势和风险?
-
-【报告定位】
-- ✅ 这是一份基于模拟的未来预测报告,揭示"如果这样,未来会怎样"
-- ✅ 聚焦于预测结果:事件走向、群体反应、涌现现象、潜在风险
-- ✅ 模拟世界中的Agent言行就是对未来人群行为的预测
-- ❌ 不是对现实世界现状的分析
-- ❌ 不是泛泛而谈的舆情综述
-
-【章节数量限制】
-- 最少2个章节,最多5个章节
-- 不需要子章节,每个章节直接撰写完整内容
-- 内容要精炼,聚焦于核心预测发现
-- 章节结构由你根据预测结果自主设计
-
-请输出JSON格式的报告大纲,格式如下:
-{
- "title": "报告标题",
- "summary": "报告摘要(一句话概括核心预测发现)",
- "sections": [
- {
- "title": "章节标题",
- "description": "章节内容描述"
- }
- ]
-}
-
-注意:sections数组最少2个,最多5个元素!"""
-
-PLAN_USER_PROMPT_TEMPLATE = """\
-【预测场景设定】
-我们向模拟世界注入的变量(模拟需求):{simulation_requirement}
-
-【模拟世界规模】
-- 参与模拟的实体数量: {total_nodes}
-- 实体间产生的关系数量: {total_edges}
-- 实体类型分布: {entity_types}
-- 活跃Agent数量: {total_entities}
-
-【模拟预测到的部分未来事实样本】
-{related_facts_json}
-
-请以「上帝视角」审视这个未来预演:
-1. 在我们设定的条件下,未来呈现出了什么样的状态?
-2. 各类人群(Agent)是如何反应和行动的?
-3. 这个模拟揭示了哪些值得关注的未来趋势?
-
-根据预测结果,设计最合适的报告章节结构。
-
-【再次提醒】报告章节数量:最少2个,最多5个,内容要精炼聚焦于核心预测发现。"""
-
-# ── 章节生成 prompt ──
-
-SECTION_SYSTEM_PROMPT_TEMPLATE = """\
-你是一个「未来预测报告」的撰写专家,正在撰写报告的一个章节。
+TOOL_DESC_PANORAMA_SEARCH = get_prompt('report_tool_desc_panorama')
-报告标题: {report_title}
-报告摘要: {report_summary}
-预测场景(模拟需求): {simulation_requirement}
+TOOL_DESC_QUICK_SEARCH = get_prompt('report_tool_desc_quick')
-当前要撰写的章节: {section_title}
+TOOL_DESC_INTERVIEW_AGENTS = get_prompt('report_tool_desc_interview')
-═══════════════════════════════════════════════════════════════
-【核心理念】
-═══════════════════════════════════════════════════════════════
-
-模拟世界是对未来的预演。我们向模拟世界注入了特定条件(模拟需求),
-模拟中Agent的行为和互动,就是对未来人群行为的预测。
-
-你的任务是:
-- 揭示在设定条件下,未来发生了什么
-- 预测各类人群(Agent)是如何反应和行动的
-- 发现值得关注的未来趋势、风险和机会
-
-❌ 不要写成对现实世界现状的分析
-✅ 要聚焦于"未来会怎样"——模拟结果就是预测的未来
-
-═══════════════════════════════════════════════════════════════
-【最重要的规则 - 必须遵守】
-═══════════════════════════════════════════════════════════════
-
-1. 【必须调用工具观察模拟世界】
- - 你正在以「上帝视角」观察未来的预演
- - 所有内容必须来自模拟世界中发生的事件和Agent言行
- - 禁止使用你自己的知识来编写报告内容
- - 每个章节至少调用3次工具(最多5次)来观察模拟的世界,它代表了未来
-
-2. 【必须引用Agent的原始言行】
- - Agent的发言和行为是对未来人群行为的预测
- - 在报告中使用引用格式展示这些预测,例如:
- > "某类人群会表示:原文内容..."
- - 这些引用是模拟预测的核心证据
-
-3. 【语言一致性 - 引用内容必须翻译为报告语言】
- - 工具返回的内容可能包含英文或中英文混杂的表述
- - 如果模拟需求和材料原文是中文的,报告必须全部使用中文撰写
- - 当你引用工具返回的英文或中英混杂内容时,必须将其翻译为流畅的中文后再写入报告
- - 翻译时保持原意不变,确保表述自然通顺
- - 这一规则同时适用于正文和引用块(> 格式)中的内容
-
-4. 【忠实呈现预测结果】
- - 报告内容必须反映模拟世界中的代表未来的模拟结果
- - 不要添加模拟中不存在的信息
- - 如果某方面信息不足,如实说明
-
-═══════════════════════════════════════════════════════════════
-【⚠️ 格式规范 - 极其重要!】
-═══════════════════════════════════════════════════════════════
-
-【一个章节 = 最小内容单位】
-- 每个章节是报告的最小分块单位
-- ❌ 禁止在章节内使用任何 Markdown 标题(#、##、###、#### 等)
-- ❌ 禁止在内容开头添加章节主标题
-- ✅ 章节标题由系统自动添加,你只需撰写纯正文内容
-- ✅ 使用**粗体**、段落分隔、引用、列表来组织内容,但不要用标题
+# ── 大纲规划 prompt ──
-【正确示例】
-```
-本章节分析了事件的舆论传播态势。通过对模拟数据的深入分析,我们发现...
-
-**首发引爆阶段**
+PLAN_SYSTEM_PROMPT = get_prompt('report_plan_system')
-微博作为舆情的第一现场,承担了信息首发的核心功能:
-
-> "微博贡献了68%的首发声量..."
-
-**情绪放大阶段**
-
-抖音平台进一步放大了事件影响力:
-
-- 视觉冲击力强
-- 情绪共鸣度高
-```
-
-【错误示例】
-```
-## 执行摘要 ← 错误!不要添加任何标题
-### 一、首发阶段 ← 错误!不要用###分小节
-#### 1.1 详细分析 ← 错误!不要用####细分
-
-本章节分析了...
-```
-
-═══════════════════════════════════════════════════════════════
-【可用检索工具】(每章节调用3-5次)
-═══════════════════════════════════════════════════════════════
+PLAN_USER_PROMPT_TEMPLATE = get_prompt('report_plan_user')
-{tools_description}
-
-【工具使用建议 - 请混合使用不同工具,不要只用一种】
-- insight_forge: 深度洞察分析,自动分解问题并多维度检索事实和关系
-- panorama_search: 广角全景搜索,了解事件全貌、时间线和演变过程
-- quick_search: 快速验证某个具体信息点
-- interview_agents: 采访模拟Agent,获取不同角色的第一人称观点和真实反应
-
-═══════════════════════════════════════════════════════════════
-【工作流程】
-═══════════════════════════════════════════════════════════════
-
-每次回复你只能做以下两件事之一(不可同时做):
-
-选项A - 调用工具:
-输出你的思考,然后用以下格式调用一个工具:
-
-{{"name": "工具名称", "parameters": {{"参数名": "参数值"}}}}
-
-系统会执行工具并把结果返回给你。你不需要也不能自己编写工具返回结果。
-
-选项B - 输出最终内容:
-当你已通过工具获取了足够信息,以 "Final Answer:" 开头输出章节内容。
-
-⚠️ 严格禁止:
-- 禁止在一次回复中同时包含工具调用和 Final Answer
-- 禁止自己编造工具返回结果(Observation),所有工具结果由系统注入
-- 每次回复最多调用一个工具
-
-═══════════════════════════════════════════════════════════════
-【章节内容要求】
-═══════════════════════════════════════════════════════════════
-
-1. 内容必须基于工具检索到的模拟数据
-2. 大量引用原文来展示模拟效果
-3. 使用Markdown格式(但禁止使用标题):
- - 使用 **粗体文字** 标记重点(代替子标题)
- - 使用列表(-或1.2.3.)组织要点
- - 使用空行分隔不同段落
- - ❌ 禁止使用 #、##、###、#### 等任何标题语法
-4. 【引用格式规范 - 必须单独成段】
- 引用必须独立成段,前后各有一个空行,不能混在段落中:
-
- ✅ 正确格式:
- ```
- 校方的回应被认为缺乏实质内容。
-
- > "校方的应对模式在瞬息万变的社交媒体环境中显得僵化和迟缓。"
-
- 这一评价反映了公众的普遍不满。
- ```
-
- ❌ 错误格式:
- ```
- 校方的回应被认为缺乏实质内容。> "校方的应对模式..." 这一评价反映了...
- ```
-5. 保持与其他章节的逻辑连贯性
-6. 【避免重复】仔细阅读下方已完成的章节内容,不要重复描述相同的信息
-7. 【再次强调】不要添加任何标题!用**粗体**代替小节标题"""
-
-SECTION_USER_PROMPT_TEMPLATE = """\
-已完成的章节内容(请仔细阅读,避免重复):
-{previous_content}
-
-═══════════════════════════════════════════════════════════════
-【当前任务】撰写章节: {section_title}
-═══════════════════════════════════════════════════════════════
+SECTION_SYSTEM_PROMPT_TEMPLATE = get_prompt('report_section_system')
-【重要提醒】
-1. 仔细阅读上方已完成的章节,避免重复相同的内容!
-2. 开始前必须先调用工具获取模拟数据
-3. 请混合使用不同工具,不要只用一种
-4. 报告内容必须来自检索结果,不要使用自己的知识
-
-【⚠️ 格式警告 - 必须遵守】
-- ❌ 不要写任何标题(#、##、###、####都不行)
-- ❌ 不要写"{section_title}"作为开头
-- ✅ 章节标题由系统自动添加
-- ✅ 直接写正文,用**粗体**代替小节标题
-
-请开始:
-1. 首先思考(Thought)这个章节需要什么信息
-2. 然后调用工具(Action)获取模拟数据
-3. 收集足够信息后输出 Final Answer(纯正文,无任何标题)"""
+SECTION_USER_PROMPT_TEMPLATE = get_prompt('report_section_user')
# ── ReACT 循环内消息模板 ──
-REACT_OBSERVATION_TEMPLATE = """\
-Observation(检索结果):
+REACT_OBSERVATION_TEMPLATE = get_prompt('report_react_observation')
-═══ 工具 {tool_name} 返回 ═══
-{result}
+REACT_INSUFFICIENT_TOOLS_MSG = get_prompt('report_react_insufficient')
-═══════════════════════════════════════════════════════════════
-已调用工具 {tool_calls_count}/{max_tool_calls} 次(已用: {used_tools_str}){unused_hint}
-- 如果信息充分:以 "Final Answer:" 开头输出章节内容(必须引用上述原文)
-- 如果需要更多信息:调用一个工具继续检索
-═══════════════════════════════════════════════════════════════"""
+REACT_INSUFFICIENT_TOOLS_MSG_ALT = get_prompt('report_react_insufficient_alt')
-REACT_INSUFFICIENT_TOOLS_MSG = (
- "【注意】你只调用了{tool_calls_count}次工具,至少需要{min_tool_calls}次。"
- "请再调用工具获取更多模拟数据,然后再输出 Final Answer。{unused_hint}"
-)
+REACT_TOOL_LIMIT_MSG = get_prompt('report_react_tool_limit')
-REACT_INSUFFICIENT_TOOLS_MSG_ALT = (
- "当前只调用了 {tool_calls_count} 次工具,至少需要 {min_tool_calls} 次。"
- "请调用工具获取模拟数据。{unused_hint}"
-)
-
-REACT_TOOL_LIMIT_MSG = (
- "工具调用次数已达上限({tool_calls_count}/{max_tool_calls}),不能再调用工具。"
- '请立即基于已获取的信息,以 "Final Answer:" 开头输出章节内容。'
-)
+REACT_UNUSED_TOOLS_HINT = get_prompt('report_react_unused_hint')
-REACT_UNUSED_TOOLS_HINT = "\n💡 你还没有使用过: {unused_list},建议尝试不同工具获取多角度信息"
-
-REACT_FORCE_FINAL_MSG = "已达到工具调用限制,请直接输出 Final Answer: 并生成章节内容。"
+REACT_FORCE_FINAL_MSG = get_prompt('report_react_force_final')
# ── Chat prompt ──
-CHAT_SYSTEM_PROMPT_TEMPLATE = """\
-你是一个简洁高效的模拟预测助手。
-
-【背景】
-预测条件: {simulation_requirement}
-
-【已生成的分析报告】
-{report_content}
+CHAT_SYSTEM_PROMPT_TEMPLATE = get_prompt('report_chat_system')
-【规则】
-1. 优先基于上述报告内容回答问题
-2. 直接回答问题,避免冗长的思考论述
-3. 仅在报告内容不足以回答时,才调用工具检索更多数据
-4. 回答要简洁、清晰、有条理
-
-【可用工具】(仅在需要时使用,最多调用1-2次)
-{tools_description}
-
-【工具调用格式】
-
-{{"name": "工具名称", "parameters": {{"参数名": "参数值"}}}}
-
-
-【回答风格】
-- 简洁直接,不要长篇大论
-- 使用 > 格式引用关键内容
-- 优先给出结论,再解释原因"""
-
-CHAT_OBSERVATION_SUFFIX = "\n\n请简洁回答问题。"
+CHAT_OBSERVATION_SUFFIX = get_string('report_chat_observation_suffix')
# ═══════════════════════════════════════════════════════════════
@@ -922,32 +578,32 @@ def _define_tools(self) -> Dict[str, Dict[str, Any]]:
"name": "insight_forge",
"description": TOOL_DESC_INSIGHT_FORGE,
"parameters": {
- "query": "你想深入分析的问题或话题",
- "report_context": "当前报告章节的上下文(可选,有助于生成更精准的子问题)"
+ "query": get_string('param_insight_query'),
+ "report_context": get_string('param_insight_context')
}
},
"panorama_search": {
"name": "panorama_search",
"description": TOOL_DESC_PANORAMA_SEARCH,
"parameters": {
- "query": "搜索查询,用于相关性排序",
- "include_expired": "是否包含过期/历史内容(默认True)"
+ "query": get_string('param_panorama_query'),
+ "include_expired": get_string('param_panorama_expired')
}
},
"quick_search": {
"name": "quick_search",
"description": TOOL_DESC_QUICK_SEARCH,
"parameters": {
- "query": "搜索查询字符串",
- "limit": "返回结果数量(可选,默认10)"
+ "query": get_string('param_quick_query'),
+ "limit": get_string('param_quick_limit')
}
},
"interview_agents": {
"name": "interview_agents",
"description": TOOL_DESC_INTERVIEW_AGENTS,
"parameters": {
- "interview_topic": "采访主题或需求描述(如:'了解学生对宿舍甲醛事件的看法')",
- "max_agents": "最多采访的Agent数量(可选,默认5,最大10)"
+ "interview_topic": get_string('param_interview_topic'),
+ "max_agents": get_string('param_interview_max')
}
}
}
@@ -1054,11 +710,11 @@ def _execute_tool(self, tool_name: str, parameters: Dict[str, Any], report_conte
return json.dumps(result, ensure_ascii=False, indent=2)
else:
- return f"未知工具: {tool_name}。请使用以下工具之一: insight_forge, panorama_search, quick_search"
-
+ return get_string('report_unknown_tool', tool_name=tool_name, available="insight_forge, panorama_search, quick_search")
+
except Exception as e:
logger.error(f"工具执行失败: {tool_name}, 错误: {str(e)}")
- return f"工具执行失败: {str(e)}"
+ return get_string('report_tool_failed', error=str(e))
# 合法的工具名称集合,用于裸 JSON 兜底解析时校验
VALID_TOOL_NAMES = {"insight_forge", "panorama_search", "quick_search", "interview_agents"}
@@ -1125,12 +781,12 @@ def _is_valid_tool_call(self, data: dict) -> bool:
def _get_tools_description(self) -> str:
"""生成工具描述文本"""
- desc_parts = ["可用工具:"]
+ desc_parts = [get_string('tools_available_prefix')]
for name, tool in self.tools.items():
params_desc = ", ".join([f"{k}: {v}" for k, v in tool["parameters"].items()])
desc_parts.append(f"- {name}: {tool['description']}")
if params_desc:
- desc_parts.append(f" 参数: {params_desc}")
+ desc_parts.append(get_string('tools_params_prefix') + params_desc)
return "\n".join(desc_parts)
def plan_outline(
@@ -1151,16 +807,16 @@ def plan_outline(
logger.info("开始规划报告大纲...")
if progress_callback:
- progress_callback("planning", 0, "正在分析模拟需求...")
-
+ progress_callback("planning", 0, get_string('progress_analyzing'))
+
# 首先获取模拟上下文
context = self.zep_tools.get_simulation_context(
graph_id=self.graph_id,
simulation_requirement=self.simulation_requirement
)
-
+
if progress_callback:
- progress_callback("planning", 30, "正在生成报告大纲...")
+ progress_callback("planning", 30, get_string('progress_generating_outline'))
system_prompt = PLAN_SYSTEM_PROMPT
user_prompt = PLAN_USER_PROMPT_TEMPLATE.format(
@@ -1182,7 +838,7 @@ def plan_outline(
)
if progress_callback:
- progress_callback("planning", 80, "正在解析大纲结构...")
+ progress_callback("planning", 80, get_string('progress_parsing_outline'))
# 解析大纲
sections = []
@@ -1199,7 +855,7 @@ def plan_outline(
)
if progress_callback:
- progress_callback("planning", 100, "大纲规划完成")
+ progress_callback("planning", 100, get_string('progress_outline_done'))
logger.info(f"大纲规划完成: {len(sections)} 个章节")
return outline
@@ -1208,12 +864,12 @@ def plan_outline(
logger.error(f"大纲规划失败: {str(e)}")
# 返回默认大纲(3个章节,作为fallback)
return ReportOutline(
- title="未来预测报告",
- summary="基于模拟预测的未来趋势与风险分析",
+ title=get_string('report_default_title'),
+ summary=get_string('report_default_summary'),
sections=[
- ReportSection(title="预测场景与核心发现"),
- ReportSection(title="人群行为预测分析"),
- ReportSection(title="趋势展望与风险提示")
+ ReportSection(title=get_string('report_default_section1')),
+ ReportSection(title=get_string('report_default_section2')),
+ ReportSection(title=get_string('report_default_section3'))
]
)
@@ -1268,7 +924,7 @@ def _generate_section_react(
previous_parts.append(truncated)
previous_content = "\n\n---\n\n".join(previous_parts)
else:
- previous_content = "(这是第一个章节)"
+ previous_content = get_string('report_first_section')
user_prompt = SECTION_USER_PROMPT_TEMPLATE.format(
previous_content=previous_content,
@@ -1296,7 +952,7 @@ def _generate_section_react(
progress_callback(
"generating",
int((iteration / max_iterations) * 100),
- f"深度检索与撰写中 ({tool_calls_count}/{self.MAX_TOOL_CALLS_PER_SECTION})"
+ get_string('progress_deep_search', count=tool_calls_count, max=self.MAX_TOOL_CALLS_PER_SECTION)
)
# 调用LLM
@@ -1311,8 +967,8 @@ def _generate_section_react(
logger.warning(f"章节 {section.title} 第 {iteration + 1} 次迭代: LLM 返回 None")
# 如果还有迭代次数,添加消息并重试
if iteration < max_iterations - 1:
- messages.append({"role": "assistant", "content": "(响应为空)"})
- messages.append({"role": "user", "content": "请继续生成内容。"})
+ messages.append({"role": "assistant", "content": get_string('report_empty_response')})
+ messages.append({"role": "user", "content": get_string('report_continue')})
continue
# 最后一次迭代也返回 None,跳出循环进入强制收尾
break
@@ -1337,13 +993,7 @@ def _generate_section_react(
messages.append({"role": "assistant", "content": response})
messages.append({
"role": "user",
- "content": (
- "【格式错误】你在一次回复中同时包含了工具调用和 Final Answer,这是不允许的。\n"
- "每次回复只能做以下两件事之一:\n"
- "- 调用一个工具(输出一个 块,不要写 Final Answer)\n"
- "- 输出最终内容(以 'Final Answer:' 开头,不要包含 )\n"
- "请重新回复,只做其中一件事。"
- ),
+ "content": get_string('report_conflict_format_error'),
})
continue
else:
@@ -1512,7 +1162,7 @@ def _generate_section_react(
# 检查强制收尾时 LLM 返回是否为 None
if response is None:
logger.error(f"章节 {section.title} 强制收尾时 LLM 返回 None,使用默认错误提示")
- final_answer = f"(本章节生成失败:LLM 返回空响应,请稍后重试)"
+ final_answer = get_string('report_section_gen_failed')
elif "Final Answer:" in response:
final_answer = response.split("Final Answer:")[-1].strip()
else:
@@ -1590,7 +1240,7 @@ def generate_report(
self.console_logger = ReportConsoleLogger(report_id)
ReportManager.update_progress(
- report_id, "pending", 0, "初始化报告...",
+ report_id, "pending", 0, get_string('progress_init_report'),
completed_sections=[]
)
ReportManager.save_report(report)
@@ -1598,7 +1248,7 @@ def generate_report(
# 阶段1: 规划大纲
report.status = ReportStatus.PLANNING
ReportManager.update_progress(
- report_id, "planning", 5, "开始规划报告大纲...",
+ report_id, "planning", 5, get_string('progress_start_outline'),
completed_sections=[]
)
@@ -1606,7 +1256,7 @@ def generate_report(
self.report_logger.log_planning_start()
if progress_callback:
- progress_callback("planning", 0, "开始规划报告大纲...")
+ progress_callback("planning", 0, get_string('progress_start_outline'))
outline = self.plan_outline(
progress_callback=lambda stage, prog, msg:
@@ -1620,7 +1270,7 @@ def generate_report(
# 保存大纲到文件
ReportManager.save_outline(report_id, outline)
ReportManager.update_progress(
- report_id, "planning", 15, f"大纲规划完成,共{len(outline.sections)}个章节",
+ report_id, "planning", 15, get_string('progress_outline_sections', count=len(outline.sections)),
completed_sections=[]
)
ReportManager.save_report(report)
@@ -1638,18 +1288,19 @@ def generate_report(
base_progress = 20 + int((i / total_sections) * 70)
# 更新进度
+ section_progress_msg = get_string('progress_generating_section', title=section.title, num=section_num, total=total_sections)
ReportManager.update_progress(
report_id, "generating", base_progress,
- f"正在生成章节: {section.title} ({section_num}/{total_sections})",
+ section_progress_msg,
current_section=section.title,
completed_sections=completed_section_titles
)
-
+
if progress_callback:
progress_callback(
- "generating",
- base_progress,
- f"正在生成章节: {section.title} ({section_num}/{total_sections})"
+ "generating",
+ base_progress,
+ section_progress_msg
)
# 生成主章节内容
@@ -1687,19 +1338,19 @@ def generate_report(
# 更新进度
ReportManager.update_progress(
- report_id, "generating",
+ report_id, "generating",
base_progress + int(70 / total_sections),
- f"章节 {section.title} 已完成",
+ get_string('progress_section_done', title=section.title),
current_section=None,
completed_sections=completed_section_titles
)
# 阶段3: 组装完整报告
if progress_callback:
- progress_callback("generating", 95, "正在组装完整报告...")
-
+ progress_callback("generating", 95, get_string('progress_assembling'))
+
ReportManager.update_progress(
- report_id, "generating", 95, "正在组装完整报告...",
+ report_id, "generating", 95, get_string('progress_assembling'),
completed_sections=completed_section_titles
)
@@ -1721,12 +1372,12 @@ def generate_report(
# 保存最终报告
ReportManager.save_report(report)
ReportManager.update_progress(
- report_id, "completed", 100, "报告生成完成",
+ report_id, "completed", 100, get_string('progress_report_done'),
completed_sections=completed_section_titles
)
-
+
if progress_callback:
- progress_callback("completed", 100, "报告生成完成")
+ progress_callback("completed", 100, get_string('progress_report_done'))
logger.info(f"报告生成完成: {report_id}")
@@ -1750,7 +1401,7 @@ def generate_report(
try:
ReportManager.save_report(report)
ReportManager.update_progress(
- report_id, "failed", -1, f"报告生成失败: {str(e)}",
+ report_id, "failed", -1, get_string('progress_report_failed', error=str(e)),
completed_sections=completed_section_titles
)
except Exception:
@@ -1796,13 +1447,13 @@ def chat(
# 限制报告长度,避免上下文过长
report_content = report.markdown_content[:15000]
if len(report.markdown_content) > 15000:
- report_content += "\n\n... [报告内容已截断] ..."
+ report_content += get_string('report_truncated')
except Exception as e:
logger.warning(f"获取报告内容失败: {e}")
system_prompt = CHAT_SYSTEM_PROMPT_TEMPLATE.format(
simulation_requirement=self.simulation_requirement,
- report_content=report_content if report_content else "(暂无报告)",
+ report_content=report_content if report_content else get_string('report_no_report'),
tools_description=self._get_tools_description(),
)
diff --git a/backend/app/services/simulation_config_generator.py b/backend/app/services/simulation_config_generator.py
index cc362508b..5b1b2355a 100644
--- a/backend/app/services/simulation_config_generator.py
+++ b/backend/app/services/simulation_config_generator.py
@@ -20,6 +20,7 @@
from ..config import Config
from ..utils.logger import get_logger
+from ..i18n import get_prompt, get_format, get_string
from .zep_entity_reader import EntityNode, ZepEntityReader
logger = get_logger('mirofish.simulation_config')
@@ -387,21 +388,21 @@ def _build_context(
# 实体摘要
entity_summary = self._summarize_entities(entities)
-
+
# 构建上下文
context_parts = [
- f"## 模拟需求\n{simulation_requirement}",
- f"\n## 实体信息 ({len(entities)}个)\n{entity_summary}",
+ get_string('sim_config_context_req') + "\n" + simulation_requirement,
+ "\n" + get_string('sim_config_context_entities', count=len(entities)) + "\n" + entity_summary,
]
-
+
current_length = sum(len(p) for p in context_parts)
remaining_length = self.MAX_CONTEXT_LENGTH - current_length - 500 # 留500字符余量
-
+
if remaining_length > 0 and document_text:
doc_text = document_text[:remaining_length]
if len(document_text) > remaining_length:
- doc_text += "\n...(文档已截断)"
- context_parts.append(f"\n## 原始文档内容\n{doc_text}")
+ doc_text += get_string('sim_config_doc_truncated')
+ context_parts.append("\n" + get_string('sim_config_context_docs') + "\n" + doc_text)
return "\n".join(context_parts)
@@ -539,52 +540,12 @@ def _generate_time_config(self, context: str, num_entities: int) -> Dict[str, An
# 计算最大允许值(80%的agent数)
max_agents_allowed = max(1, int(num_entities * 0.9))
- prompt = f"""基于以下模拟需求,生成时间模拟配置。
-
-{context_truncated}
-
-## 任务
-请生成时间配置JSON。
-
-### 基本原则(仅供参考,需根据具体事件和参与群体灵活调整):
-- 用户群体为中国人,需符合北京时间作息习惯
-- 凌晨0-5点几乎无人活动(活跃度系数0.05)
-- 早上6-8点逐渐活跃(活跃度系数0.4)
-- 工作时间9-18点中等活跃(活跃度系数0.7)
-- 晚间19-22点是高峰期(活跃度系数1.5)
-- 23点后活跃度下降(活跃度系数0.5)
-- 一般规律:凌晨低活跃、早间渐增、工作时段中等、晚间高峰
-- **重要**:以下示例值仅供参考,你需要根据事件性质、参与群体特点来调整具体时段
- - 例如:学生群体高峰可能是21-23点;媒体全天活跃;官方机构只在工作时间
- - 例如:突发热点可能导致深夜也有讨论,off_peak_hours 可适当缩短
-
-### 返回JSON格式(不要markdown)
-
-示例:
-{{
- "total_simulation_hours": 72,
- "minutes_per_round": 60,
- "agents_per_hour_min": 5,
- "agents_per_hour_max": 50,
- "peak_hours": [19, 20, 21, 22],
- "off_peak_hours": [0, 1, 2, 3, 4, 5],
- "morning_hours": [6, 7, 8],
- "work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
- "reasoning": "针对该事件的时间配置说明"
-}}
-
-字段说明:
-- total_simulation_hours (int): 模拟总时长,24-168小时,突发事件短、持续话题长
-- minutes_per_round (int): 每轮时长,30-120分钟,建议60分钟
-- agents_per_hour_min (int): 每小时最少激活Agent数(取值范围: 1-{max_agents_allowed})
-- agents_per_hour_max (int): 每小时最多激活Agent数(取值范围: 1-{max_agents_allowed})
-- peak_hours (int数组): 高峰时段,根据事件参与群体调整
-- off_peak_hours (int数组): 低谷时段,通常深夜凌晨
-- morning_hours (int数组): 早间时段
-- work_hours (int数组): 工作时段
-- reasoning (string): 简要说明为什么这样配置"""
+ prompt = get_prompt('sim_config_time').format(
+ context=context_truncated,
+ max_agents_allowed=max_agents_allowed
+ )
- system_prompt = "你是社交媒体模拟专家。返回纯JSON格式,时间配置需符合中国人作息习惯。"
+ system_prompt = get_prompt('sim_config_time_system')
try:
return self._call_llm_with_retry(prompt, system_prompt)
@@ -603,7 +564,7 @@ def _get_default_time_config(self, num_entities: int) -> Dict[str, Any]:
"off_peak_hours": [0, 1, 2, 3, 4, 5],
"morning_hours": [6, 7, 8],
"work_hours": [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
- "reasoning": "使用默认中国人作息配置(每轮1小时)"
+ "reasoning": get_string('sim_config_default_time_reasoning')
}
def _parse_time_config(self, result: Dict[str, Any], num_entities: int) -> TimeSimulationConfig:
@@ -671,36 +632,13 @@ def _generate_event_config(
# 使用配置的上下文截断长度
context_truncated = context[:self.EVENT_CONFIG_CONTEXT_LENGTH]
- prompt = f"""基于以下模拟需求,生成事件配置。
-
-模拟需求: {simulation_requirement}
-
-{context_truncated}
-
-## 可用实体类型及示例
-{type_info}
-
-## 任务
-请生成事件配置JSON:
-- 提取热点话题关键词
-- 描述舆论发展方向
-- 设计初始帖子内容,**每个帖子必须指定 poster_type(发布者类型)**
-
-**重要**: poster_type 必须从上面的"可用实体类型"中选择,这样初始帖子才能分配给合适的 Agent 发布。
-例如:官方声明应由 Official/University 类型发布,新闻由 MediaOutlet 发布,学生观点由 Student 发布。
-
-返回JSON格式(不要markdown):
-{{
- "hot_topics": ["关键词1", "关键词2", ...],
- "narrative_direction": "<舆论发展方向描述>",
- "initial_posts": [
- {{"content": "帖子内容", "poster_type": "实体类型(必须从可用类型中选择)"}},
- ...
- ],
- "reasoning": "<简要说明>"
-}}"""
+ prompt = get_prompt('sim_config_event').format(
+ simulation_requirement=simulation_requirement,
+ context=context_truncated,
+ type_info=type_info
+ )
- system_prompt = "你是舆论分析专家。返回纯JSON格式。注意 poster_type 必须精确匹配可用实体类型。"
+ system_prompt = get_prompt('sim_config_event_system')
try:
return self._call_llm_with_retry(prompt, system_prompt)
@@ -710,7 +648,7 @@ def _generate_event_config(
"hot_topics": [],
"narrative_direction": "",
"initial_posts": [],
- "reasoning": "使用默认配置"
+ "reasoning": get_string('sim_config_default_event_reasoning')
}
def _parse_event_config(self, result: Dict[str, Any]) -> EventConfig:
@@ -863,7 +801,7 @@ def _generate_agent_configs_batch(
]
}}"""
- system_prompt = "你是社交媒体行为分析专家。返回纯JSON,配置需符合中国人作息习惯。"
+ system_prompt = get_prompt('sim_config_agent_system')
try:
result = self._call_llm_with_retry(prompt, system_prompt)
diff --git a/backend/app/services/zep_graph_memory_updater.py b/backend/app/services/zep_graph_memory_updater.py
index a8f3cecd9..5283703ba 100644
--- a/backend/app/services/zep_graph_memory_updater.py
+++ b/backend/app/services/zep_graph_memory_updater.py
@@ -16,6 +16,7 @@
from ..config import Config
from ..utils.logger import get_logger
+from ..i18n import get_prompt, get_format, get_string
logger = get_logger('mirofish.zep_graph_memory_updater')
@@ -63,139 +64,139 @@ def to_episode_text(self) -> str:
def _describe_create_post(self) -> str:
content = self.action_args.get("content", "")
if content:
- return f"发布了一条帖子:「{content}」"
- return "发布了一条帖子"
-
+ return get_format('action_create_post_with_content').format(content=content)
+ return get_format('action_create_post')
+
def _describe_like_post(self) -> str:
"""点赞帖子 - 包含帖子原文和作者信息"""
post_content = self.action_args.get("post_content", "")
post_author = self.action_args.get("post_author_name", "")
-
+
if post_content and post_author:
- return f"点赞了{post_author}的帖子:「{post_content}」"
+ return get_format('action_like_post_with_both').format(author=post_author, content=post_content)
elif post_content:
- return f"点赞了一条帖子:「{post_content}」"
+ return get_format('action_like_post_with_content').format(content=post_content)
elif post_author:
- return f"点赞了{post_author}的一条帖子"
- return "点赞了一条帖子"
-
+ return get_format('action_like_post_with_author').format(author=post_author)
+ return get_format('action_like_post')
+
def _describe_dislike_post(self) -> str:
"""踩帖子 - 包含帖子原文和作者信息"""
post_content = self.action_args.get("post_content", "")
post_author = self.action_args.get("post_author_name", "")
-
+
if post_content and post_author:
- return f"踩了{post_author}的帖子:「{post_content}」"
+ return get_format('action_dislike_post_with_both').format(author=post_author, content=post_content)
elif post_content:
- return f"踩了一条帖子:「{post_content}」"
+ return get_format('action_dislike_post_with_content').format(content=post_content)
elif post_author:
- return f"踩了{post_author}的一条帖子"
- return "踩了一条帖子"
-
+ return get_format('action_dislike_post_with_author').format(author=post_author)
+ return get_format('action_dislike_post')
+
def _describe_repost(self) -> str:
"""转发帖子 - 包含原帖内容和作者信息"""
original_content = self.action_args.get("original_content", "")
original_author = self.action_args.get("original_author_name", "")
-
+
if original_content and original_author:
- return f"转发了{original_author}的帖子:「{original_content}」"
+ return get_format('action_repost_with_both').format(author=original_author, content=original_content)
elif original_content:
- return f"转发了一条帖子:「{original_content}」"
+ return get_format('action_repost_with_content').format(content=original_content)
elif original_author:
- return f"转发了{original_author}的一条帖子"
- return "转发了一条帖子"
-
+ return get_format('action_repost_with_author').format(author=original_author)
+ return get_format('action_repost')
+
def _describe_quote_post(self) -> str:
"""引用帖子 - 包含原帖内容、作者信息和引用评论"""
original_content = self.action_args.get("original_content", "")
original_author = self.action_args.get("original_author_name", "")
quote_content = self.action_args.get("quote_content", "") or self.action_args.get("content", "")
-
+
base = ""
if original_content and original_author:
- base = f"引用了{original_author}的帖子「{original_content}」"
+ base = get_format('action_quote_with_both').format(author=original_author, content=original_content)
elif original_content:
- base = f"引用了一条帖子「{original_content}」"
+ base = get_format('action_quote_with_content').format(content=original_content)
elif original_author:
- base = f"引用了{original_author}的一条帖子"
+ base = get_format('action_quote_with_author').format(author=original_author)
else:
- base = "引用了一条帖子"
-
+ base = get_format('action_quote')
+
if quote_content:
- base += f",并评论道:「{quote_content}」"
+ base += get_format('action_quote_comment').format(content=quote_content)
return base
-
+
def _describe_follow(self) -> str:
"""关注用户 - 包含被关注用户的名称"""
target_user_name = self.action_args.get("target_user_name", "")
-
+
if target_user_name:
- return f"关注了用户「{target_user_name}」"
- return "关注了一个用户"
-
+ return get_format('action_follow_with_name').format(name=target_user_name)
+ return get_format('action_follow')
+
def _describe_create_comment(self) -> str:
"""发表评论 - 包含评论内容和所评论的帖子信息"""
content = self.action_args.get("content", "")
post_content = self.action_args.get("post_content", "")
post_author = self.action_args.get("post_author_name", "")
-
+
if content:
if post_content and post_author:
- return f"在{post_author}的帖子「{post_content}」下评论道:「{content}」"
+ return get_format('action_comment_full').format(author=post_author, post_content=post_content, content=content)
elif post_content:
- return f"在帖子「{post_content}」下评论道:「{content}」"
+ return get_format('action_comment_with_content_only').format(post_content=post_content, content=content)
elif post_author:
- return f"在{post_author}的帖子下评论道:「{content}」"
- return f"评论道:「{content}」"
- return "发表了评论"
-
+ return get_format('action_comment_with_author').format(author=post_author, content=content)
+ return get_format('action_comment_content').format(content=content)
+ return get_format('action_comment')
+
def _describe_like_comment(self) -> str:
"""点赞评论 - 包含评论内容和作者信息"""
comment_content = self.action_args.get("comment_content", "")
comment_author = self.action_args.get("comment_author_name", "")
-
+
if comment_content and comment_author:
- return f"点赞了{comment_author}的评论:「{comment_content}」"
+ return get_format('action_like_comment_with_both').format(author=comment_author, content=comment_content)
elif comment_content:
- return f"点赞了一条评论:「{comment_content}」"
+ return get_format('action_like_comment_with_content').format(content=comment_content)
elif comment_author:
- return f"点赞了{comment_author}的一条评论"
- return "点赞了一条评论"
-
+ return get_format('action_like_comment_with_author').format(author=comment_author)
+ return get_format('action_like_comment')
+
def _describe_dislike_comment(self) -> str:
"""踩评论 - 包含评论内容和作者信息"""
comment_content = self.action_args.get("comment_content", "")
comment_author = self.action_args.get("comment_author_name", "")
-
+
if comment_content and comment_author:
- return f"踩了{comment_author}的评论:「{comment_content}」"
+ return get_format('action_dislike_comment_with_both').format(author=comment_author, content=comment_content)
elif comment_content:
- return f"踩了一条评论:「{comment_content}」"
+ return get_format('action_dislike_comment_with_content').format(content=comment_content)
elif comment_author:
- return f"踩了{comment_author}的一条评论"
- return "踩了一条评论"
-
+ return get_format('action_dislike_comment_with_author').format(author=comment_author)
+ return get_format('action_dislike_comment')
+
def _describe_search(self) -> str:
"""搜索帖子 - 包含搜索关键词"""
query = self.action_args.get("query", "") or self.action_args.get("keyword", "")
- return f"搜索了「{query}」" if query else "进行了搜索"
-
+ return get_format('action_search_with_query').format(query=query) if query else get_format('action_search')
+
def _describe_search_user(self) -> str:
"""搜索用户 - 包含搜索关键词"""
query = self.action_args.get("query", "") or self.action_args.get("username", "")
- return f"搜索了用户「{query}」" if query else "搜索了用户"
-
+ return get_format('action_search_user_with_query').format(query=query) if query else get_format('action_search_user')
+
def _describe_mute(self) -> str:
"""屏蔽用户 - 包含被屏蔽用户的名称"""
target_user_name = self.action_args.get("target_user_name", "")
-
+
if target_user_name:
- return f"屏蔽了用户「{target_user_name}」"
- return "屏蔽了一个用户"
-
+ return get_format('action_mute_with_name').format(name=target_user_name)
+ return get_format('action_mute')
+
def _describe_generic(self) -> str:
# 对于未知的动作类型,生成通用描述
- return f"执行了{self.action_type}操作"
+ return get_format('action_generic').format(action_type=self.action_type)
class ZepGraphMemoryUpdater:
@@ -215,10 +216,10 @@ class ZepGraphMemoryUpdater:
# 批量发送大小(每个平台累积多少条后发送)
BATCH_SIZE = 5
- # 平台名称映射(用于控制台显示)
+ # 平台名称映射(用于控制台显示,从 i18n 加载)
PLATFORM_DISPLAY_NAMES = {
- 'twitter': '世界1',
- 'reddit': '世界2',
+ 'twitter': get_format('platform_twitter'),
+ 'reddit': get_format('platform_reddit'),
}
# 发送间隔(秒),避免请求过快
diff --git a/backend/app/services/zep_tools.py b/backend/app/services/zep_tools.py
index 384cf540f..4d8275d79 100644
--- a/backend/app/services/zep_tools.py
+++ b/backend/app/services/zep_tools.py
@@ -19,6 +19,7 @@
from ..utils.logger import get_logger
from ..utils.llm_client import LLMClient
from ..utils.zep_paging import fetch_all_nodes, fetch_all_edges
+from ..i18n import get_prompt, get_format, get_string
logger = get_logger('mirofish.zep_tools')
@@ -43,13 +44,13 @@ def to_dict(self) -> Dict[str, Any]:
def to_text(self) -> str:
"""转换为文本格式,供LLM理解"""
- text_parts = [f"搜索查询: {self.query}", f"找到 {self.total_count} 条相关信息"]
-
+ text_parts = [get_format('search_query').format(query=self.query), get_format('search_results_found').format(count=self.total_count)]
+
if self.facts:
- text_parts.append("\n### 相关事实:")
+ text_parts.append(get_format('search_facts_header'))
for i, fact in enumerate(self.facts, 1):
text_parts.append(f"{i}. {fact}")
-
+
return "\n".join(text_parts)
@@ -73,8 +74,8 @@ def to_dict(self) -> Dict[str, Any]:
def to_text(self) -> str:
"""转换为文本格式"""
- entity_type = next((l for l in self.labels if l not in ["Entity", "Node"]), "未知类型")
- return f"实体: {self.name} (类型: {entity_type})\n摘要: {self.summary}"
+ entity_type = next((l for l in self.labels if l not in ["Entity", "Node"]), get_format('entity_unknown_type'))
+ return get_format('entity_format').format(name=self.name, type=entity_type) + "\n" + get_format('entity_summary').format(summary=self.summary)
@dataclass
@@ -112,15 +113,15 @@ def to_text(self, include_temporal: bool = False) -> str:
"""转换为文本格式"""
source = self.source_node_name or self.source_node_uuid[:8]
target = self.target_node_name or self.target_node_uuid[:8]
- base_text = f"关系: {source} --[{self.name}]--> {target}\n事实: {self.fact}"
-
+ base_text = get_format('edge_format').format(source=source, name=self.name, target=target) + "\n" + get_format('edge_fact').format(fact=self.fact)
+
if include_temporal:
valid_at = self.valid_at or "未知"
invalid_at = self.invalid_at or "至今"
- base_text += f"\n时效: {valid_at} - {invalid_at}"
+ base_text += get_format('edge_validity').format(valid_at=valid_at, invalid_at=invalid_at)
if self.expired_at:
- base_text += f" (已过期: {self.expired_at})"
-
+ base_text += " (" + get_format('edge_expired').format(expired_at=self.expired_at) + ")"
+
return base_text
@property
@@ -170,43 +171,43 @@ def to_dict(self) -> Dict[str, Any]:
def to_text(self) -> str:
"""转换为详细的文本格式,供LLM理解"""
text_parts = [
- f"## 未来预测深度分析",
- f"分析问题: {self.query}",
- f"预测场景: {self.simulation_requirement}",
- f"\n### 预测数据统计",
- f"- 相关预测事实: {self.total_facts}条",
- f"- 涉及实体: {self.total_entities}个",
- f"- 关系链: {self.total_relationships}条"
+ get_format('insight_header'),
+ get_format('insight_query').format(query=self.query),
+ get_format('insight_scenario').format(requirement=self.simulation_requirement),
+ get_format('insight_stats_header'),
+ "- " + get_format('insight_facts_count').format(count=self.total_facts),
+ "- " + get_format('insight_entities_count').format(count=self.total_entities),
+ "- " + get_format('insight_relations_count').format(count=self.total_relationships)
]
-
+
# 子问题
if self.sub_queries:
- text_parts.append(f"\n### 分析的子问题")
+ text_parts.append(get_format('insight_subqueries_header'))
for i, sq in enumerate(self.sub_queries, 1):
text_parts.append(f"{i}. {sq}")
-
+
# 语义搜索结果
if self.semantic_facts:
- text_parts.append(f"\n### 【关键事实】(请在报告中引用这些原文)")
+ text_parts.append(get_format('insight_key_facts_header'))
for i, fact in enumerate(self.semantic_facts, 1):
text_parts.append(f"{i}. \"{fact}\"")
-
+
# 实体洞察
if self.entity_insights:
- text_parts.append(f"\n### 【核心实体】")
+ text_parts.append(get_format('insight_entities_header'))
for entity in self.entity_insights:
- text_parts.append(f"- **{entity.get('name', '未知')}** ({entity.get('type', '实体')})")
+ text_parts.append(get_format('insight_entity_format').format(name=entity.get('name', '未知'), type=entity.get('type', '实体')))
if entity.get('summary'):
- text_parts.append(f" 摘要: \"{entity.get('summary')}\"")
+ text_parts.append(get_format('insight_entity_summary').format(summary=entity.get('summary')))
if entity.get('related_facts'):
- text_parts.append(f" 相关事实: {len(entity.get('related_facts', []))}条")
+ text_parts.append(get_format('insight_entity_facts').format(count=len(entity.get('related_facts', []))))
# 关系链
if self.relationship_chains:
- text_parts.append(f"\n### 【关系链】")
+ text_parts.append(get_format('insight_relations_header'))
for chain in self.relationship_chains:
text_parts.append(f"- {chain}")
-
+
return "\n".join(text_parts)
@@ -249,34 +250,34 @@ def to_dict(self) -> Dict[str, Any]:
def to_text(self) -> str:
"""转换为文本格式(完整版本,不截断)"""
text_parts = [
- f"## 广度搜索结果(未来全景视图)",
- f"查询: {self.query}",
- f"\n### 统计信息",
- f"- 总节点数: {self.total_nodes}",
- f"- 总边数: {self.total_edges}",
- f"- 当前有效事实: {self.active_count}条",
- f"- 历史/过期事实: {self.historical_count}条"
+ get_format('panorama_header'),
+ get_format('panorama_query').format(query=self.query),
+ get_format('panorama_stats_header'),
+ get_format('panorama_nodes').format(count=self.total_nodes),
+ get_format('panorama_edges').format(count=self.total_edges),
+ get_format('panorama_active').format(count=self.active_count),
+ get_format('panorama_historical').format(count=self.historical_count)
]
-
+
# 当前有效的事实(完整输出,不截断)
if self.active_facts:
- text_parts.append(f"\n### 【当前有效事实】(模拟结果原文)")
+ text_parts.append(get_format('panorama_active_header'))
for i, fact in enumerate(self.active_facts, 1):
text_parts.append(f"{i}. \"{fact}\"")
-
+
# 历史/过期事实(完整输出,不截断)
if self.historical_facts:
- text_parts.append(f"\n### 【历史/过期事实】(演变过程记录)")
+ text_parts.append(get_format('panorama_historical_header'))
for i, fact in enumerate(self.historical_facts, 1):
text_parts.append(f"{i}. \"{fact}\"")
-
+
# 关键实体(完整输出,不截断)
if self.all_nodes:
- text_parts.append(f"\n### 【涉及实体】")
+ text_parts.append(get_format('panorama_entities_header'))
for node in self.all_nodes:
entity_type = next((l for l in node.labels if l not in ["Entity", "Node"]), "实体")
- text_parts.append(f"- **{node.name}** ({entity_type})")
-
+ text_parts.append(get_format('insight_entity_format').format(name=node.name, type=entity_type))
+
return "\n".join(text_parts)
@@ -303,11 +304,11 @@ def to_dict(self) -> Dict[str, Any]:
def to_text(self) -> str:
text = f"**{self.agent_name}** ({self.agent_role})\n"
# 显示完整的agent_bio,不截断
- text += f"_简介: {self.agent_bio}_\n\n"
+ text += get_format('agent_bio').format(bio=self.agent_bio) + "\n\n"
text += f"**Q:** {self.question}\n\n"
text += f"**A:** {self.response}\n"
if self.key_quotes:
- text += "\n**关键引言:**\n"
+ text += get_format('agent_key_quotes')
for quote in self.key_quotes:
# 清理各种引号
clean_quote = quote.replace('\u201c', '').replace('\u201d', '').replace('"', '')
@@ -374,25 +375,25 @@ def to_dict(self) -> Dict[str, Any]:
def to_text(self) -> str:
"""转换为详细的文本格式,供LLM理解和报告引用"""
text_parts = [
- "## 深度采访报告",
- f"**采访主题:** {self.interview_topic}",
- f"**采访人数:** {self.interviewed_count} / {self.total_agents} 位模拟Agent",
- "\n### 采访对象选择理由",
- self.selection_reasoning or "(自动选择)",
+ get_format('interview_header'),
+ get_format('interview_topic').format(topic=self.interview_topic),
+ get_format('interview_count').format(interviewed=self.interviewed_count, total=self.total_agents),
+ get_format('interview_selection_header'),
+ self.selection_reasoning or get_format('interview_auto_selection'),
"\n---",
- "\n### 采访实录",
+ get_format('interview_records_header'),
]
if self.interviews:
for i, interview in enumerate(self.interviews, 1):
- text_parts.append(f"\n#### 采访 #{i}: {interview.agent_name}")
+ text_parts.append(get_format('interview_entry').format(index=i, name=interview.agent_name))
text_parts.append(interview.to_text())
text_parts.append("\n---")
else:
- text_parts.append("(无采访记录)\n\n---")
+ text_parts.append(get_format('interview_no_record'))
- text_parts.append("\n### 采访摘要与核心观点")
- text_parts.append(self.summary or "(无摘要)")
+ text_parts.append(get_format('interview_summary_header'))
+ text_parts.append(self.summary or get_format('interview_no_summary'))
return "\n".join(text_parts)
@@ -1101,23 +1102,15 @@ def _generate_sub_queries(
将复杂问题分解为多个可以独立检索的子问题
"""
- system_prompt = """你是一个专业的问题分析专家。你的任务是将一个复杂问题分解为多个可以在模拟世界中独立观察的子问题。
+ system_prompt = get_prompt('zep_subquery_system')
-要求:
-1. 每个子问题应该足够具体,可以在模拟世界中找到相关的Agent行为或事件
-2. 子问题应该覆盖原问题的不同维度(如:谁、什么、为什么、怎么样、何时、何地)
-3. 子问题应该与模拟场景相关
-4. 返回JSON格式:{"sub_queries": ["子问题1", "子问题2", ...]}"""
-
- user_prompt = f"""模拟需求背景:
-{simulation_requirement}
-
-{f"报告上下文:{report_context[:500]}" if report_context else ""}
-
-请将以下问题分解为{max_queries}个子问题:
-{query}
-
-返回JSON格式的子问题列表。"""
+ report_context_line = f"报告上下文:{report_context[:500]}" if report_context else ""
+ user_prompt = get_prompt('zep_subquery_user').format(
+ simulation_requirement=simulation_requirement,
+ report_context_line=report_context_line,
+ max_queries=max_queries,
+ query=query
+ )
try:
response = self.llm.chat_json(
@@ -1137,9 +1130,9 @@ def _generate_sub_queries(
# 降级:返回基于原问题的变体
return [
query,
- f"{query} 的主要参与者",
- f"{query} 的原因和影响",
- f"{query} 的发展过程"
+ get_string('subquery_main_participants', query=query),
+ get_string('subquery_causes_effects', query=query),
+ get_string('subquery_development', query=query)
][:max_queries]
def panorama_search(
@@ -1318,7 +1311,7 @@ def interview_agents(
if not profiles:
logger.warning(f"未找到模拟 {simulation_id} 的人设文件")
- result.summary = "未找到可采访的Agent人设文件"
+ result.summary = get_string('interview_no_profiles')
return result
result.total_agents = len(profiles)
@@ -1349,17 +1342,7 @@ def interview_agents(
combined_prompt = "\n".join([f"{i+1}. {q}" for i, q in enumerate(result.interview_questions)])
# 添加优化前缀,约束Agent回复格式
- INTERVIEW_PROMPT_PREFIX = (
- "你正在接受一次采访。请结合你的人设、所有的过往记忆与行动,"
- "以纯文本方式直接回答以下问题。\n"
- "回复要求:\n"
- "1. 直接用自然语言回答,不要调用任何工具\n"
- "2. 不要返回JSON格式或工具调用格式\n"
- "3. 不要使用Markdown标题(如#、##、###)\n"
- "4. 按问题编号逐一回答,每个回答以「问题X:」开头(X为问题编号)\n"
- "5. 每个问题的回答之间用空行分隔\n"
- "6. 回答要有实质内容,每个问题至少回答2-3句话\n\n"
- )
+ INTERVIEW_PROMPT_PREFIX = get_prompt('interview_prompt_prefix')
optimized_prompt = f"{INTERVIEW_PROMPT_PREFIX}{combined_prompt}"
# Step 4: 调用真实的采访API(不指定platform,默认双平台同时采访)
@@ -1389,7 +1372,7 @@ def interview_agents(
if not api_result.get("success", False):
error_msg = api_result.get("error", "未知错误")
logger.warning(f"采访API返回失败: {error_msg}")
- result.summary = f"采访API调用失败:{error_msg}。请检查OASIS模拟环境状态。"
+ result.summary = get_string('interview_api_failed', error=error_msg)
return result
# Step 5: 解析API返回结果,构建AgentInterview对象
@@ -1415,9 +1398,10 @@ def interview_agents(
reddit_response = self._clean_tool_call_response(reddit_response)
# 始终输出双平台标记
- twitter_text = twitter_response if twitter_response else "(该平台未获得回复)"
- reddit_text = reddit_response if reddit_response else "(该平台未获得回复)"
- response_text = f"【Twitter平台回答】\n{twitter_text}\n\n【Reddit平台回答】\n{reddit_text}"
+ no_response = get_format('platform_no_response')
+ twitter_text = twitter_response if twitter_response else no_response
+ reddit_text = reddit_response if reddit_response else no_response
+ response_text = f"{get_format('platform_twitter_header')}\n{twitter_text}\n\n{get_format('platform_reddit_header')}\n{reddit_text}"
# 提取关键引言(从两个平台的回答中)
import re
@@ -1462,13 +1446,13 @@ def interview_agents(
except ValueError as e:
# 模拟环境未运行
logger.warning(f"采访API调用失败(环境未运行?): {e}")
- result.summary = f"采访失败:{str(e)}。模拟环境可能已关闭,请确保OASIS环境正在运行。"
+ result.summary = get_string('interview_failed', error=str(e))
return result
except Exception as e:
logger.error(f"采访API调用异常: {e}")
import traceback
logger.error(traceback.format_exc())
- result.summary = f"采访过程发生错误:{str(e)}"
+ result.summary = get_string('interview_error', error=str(e))
return result
# Step 6: 生成采访摘要
diff --git a/frontend/src/i18n/de.json b/frontend/src/i18n/de.json
new file mode 100644
index 000000000..7617bcb96
--- /dev/null
+++ b/frontend/src/i18n/de.json
@@ -0,0 +1,252 @@
+{
+ "nav.github_link": "Besuche unsere Github-Seite",
+
+ "home.tag": "Einfache und universelle Schwarmintelligenz-Engine",
+ "home.version": "v0.1-Vorschau",
+ "home.title_line1": "Beliebige Berichte hochladen",
+ "home.title_line2": "Die Zukunft sofort simulieren",
+ "home.description": "Selbst mit nur einem Textabschnitt kann {brand} basierend auf den darin enthaltenen Realwelt-Samen automatisch eine Parallelwelt mit bis zu {agents} erzeugen. Durch das Einsetzen von Variablen aus der Vogelperspektive wird in komplexen Gruppeninteraktionen nach dem {optimum} in dynamischen Umgebungen gesucht.",
+ "home.description_brand": "MiroFish",
+ "home.description_agents": "Millionen von Agenten",
+ "home.description_optimum": "\"lokalen Optimum\"",
+ "home.slogan": "Lass die Zukunft im Agenten-Schwarm proben, lass Entscheidungen nach hundert Schlachten siegen",
+ "home.status_title": "Systemstatus",
+ "home.status_ready": "Bereit",
+ "home.status_desc": "Vorhersage-Engine im Standby-Modus. Laden Sie mehrere unstrukturierte Daten hoch, um die Simulationssequenz zu initialisieren.",
+ "home.metric_cost": "Kostengünstig",
+ "home.metric_cost_desc": "Durchschnittlich 5$/Simulation",
+ "home.metric_scale": "Hochverfügbar",
+ "home.metric_scale_desc": "Bis zu Millionen von Agenten",
+ "home.workflow_title": "Workflow-Sequenz",
+ "home.step1_title": "Graphen-Aufbau",
+ "home.step1_desc": "Realwelt-Samen-Extraktion & Einzel- und Gruppenspeicher-Injektion & GraphRAG-Aufbau",
+ "home.step2_title": "Umgebungsaufbau",
+ "home.step2_desc": "Entitaets-Beziehungsextraktion & Persona-Generierung & Umgebungskonfigurations-Agent injiziert Simulationsparameter",
+ "home.step3_title": "Simulation starten",
+ "home.step3_desc": "Parallele Dual-Plattform-Simulation & automatische Vorhersagebedarfsanalyse & dynamische Aktualisierung des Zeitspeichermodells",
+ "home.step4_title": "Berichterstellung",
+ "home.step4_desc": "ReportAgent verfuegt ueber umfangreiche Werkzeuge fuer tiefgehende Interaktion mit der simulierten Umgebung",
+ "home.step5_title": "Tiefgehende Interaktion",
+ "home.step5_desc": "Gespräche mit beliebigen Personen in der simulierten Welt & Gespräche mit dem ReportAgent",
+ "home.upload_title": "Dateien hierher ziehen",
+ "home.upload_hint": "oder klicken zum Durchsuchen",
+ "home.console_seed": "Realwelt-Samen",
+ "home.console_format": "Unterstützte Formate: PDF, MD, TXT",
+ "home.console_prompt": "Simulations-Prompt",
+ "home.divider": "Eingabeparameter",
+ "home.placeholder": "// Geben Sie Simulations- oder Vorhersageanforderungen in natürlicher Sprache ein (z.B. Wenn Universität X eine Aufhebung der Strafe bekannt gibt, welche öffentliche Meinung entsteht?)",
+ "home.engine_badge": "Engine: MiroFish-V1.0",
+ "home.start_btn": "Engine starten",
+ "home.loading_btn": "Initialisierung...",
+
+ "main.view_graph": "Graph",
+ "main.view_split": "Zweispaltig",
+ "main.view_workbench": "Arbeitsbereich",
+ "main.step_prefix": "Schritt",
+ "main.step_names.0": "Graphaufbau",
+ "main.step_names.1": "Umgebungseinrichtung",
+ "main.step_names.2": "Simulation starten",
+ "main.step_names.3": "Berichterstellung",
+ "main.step_names.4": "Tiefeninteraktion",
+
+ "step1.ontology_title": "Ontologie-Generierung",
+ "step1.ontology_desc": "LLM analysiert Dokumentinhalte und Simulationsanforderungen, extrahiert Realitaetskeime und generiert automatisch eine geeignete Ontologiestruktur",
+ "step1.ontology_analyzing": "Dokumente werden analysiert...",
+ "step1.ontology_entity_badge": "ENTITAET",
+ "step1.ontology_relation_badge": "BEZIEHUNG",
+ "step1.graph_build_title": "GraphRAG-Aufbau",
+ "step1.graph_build_desc": "Basierend auf der generierten Ontologie werden Dokumente automatisch segmentiert und ueber Zep ein Wissensgraph aufgebaut, Entitaeten und Beziehungen extrahiert sowie zeitliche Erinnerungen und Community-Zusammenfassungen erstellt",
+ "step1.stat_nodes": "Entitaetsknoten",
+ "step1.stat_edges": "Beziehungskanten",
+ "step1.stat_types": "SCHEMA-Typen",
+ "step1.complete_title": "Aufbau abgeschlossen",
+ "step1.complete_desc": "Graphaufbau abgeschlossen. Bitte fahren Sie mit dem naechsten Schritt zur Simulationsumgebung fort.",
+ "step1.enter_env_setup": "Zur Umgebungseinrichtung ➝",
+ "step1.creating": "Wird erstellt...",
+ "step1.missing_info": "Fehlende Projekt- oder Graph-Informationen",
+ "step1.create_failed": "Simulation konnte nicht erstellt werden",
+ "step1.create_error": "Fehler beim Erstellen der Simulation",
+ "step1.unknown_error": "Unbekannter Fehler",
+
+ "status.completed": "Abgeschlossen",
+ "status.processing": "Generierung",
+ "status.pending": "Warten",
+ "status.in_progress": "In Bearbeitung",
+ "status.initializing": "Initialisierung",
+
+ "step2.instance_title": "Simulationsinstanz-Initialisierung",
+ "step2.instance_desc": "Neue Simulationsinstanz erstellen und Parametervorlagen der Simulationswelt abrufen",
+ "step2.task_completed": "Asynchrone Aufgabe abgeschlossen",
+ "step2.profile_title": "Agent-Persoenlichkeiten generieren",
+ "step2.profile_desc": "Unter Beruecksichtigung des Kontexts werden automatisch Werkzeuge aufgerufen, um Entitaeten und Beziehungen aus dem Wissensgraphen zu extrahieren, simulierte Individuen zu initialisieren und ihnen basierend auf Realitaetskeimen einzigartiges Verhalten und Erinnerungen zuzuweisen",
+ "step2.stat_current_agents": "Aktuelle Agent-Anzahl",
+ "step2.stat_expected_agents": "Erwartete Agent-Gesamtzahl",
+ "step2.stat_topics": "Verknuepfte Themen der Realitaetskeime",
+ "step2.profiles_preview_title": "Generierte Agent-Persoenlichkeiten",
+ "step2.unknown_profession": "Unbekannter Beruf",
+ "step2.no_bio": "Keine Beschreibung vorhanden",
+ "step2.config_title": "Dual-Plattform-Simulationskonfiguration",
+ "step2.config_desc": "LLM legt basierend auf Simulationsanforderungen und Realitaetskeimen intelligent die Zeitgeschwindigkeit der Welt, Empfehlungsalgorithmen, aktive Zeitfenster jedes Individuums, Beitragsfrequenz, Ereignisausloeser und weitere Parameter fest",
+ "step2.sim_duration": "Simulationsdauer",
+ "step2.hours": "Stunden",
+ "step2.round_duration": "Dauer pro Runde",
+ "step2.minutes": "Minuten",
+ "step2.total_rounds": "Gesamtrunden",
+ "step2.rounds_unit": "Runden",
+ "step2.active_per_hour": "Aktiv pro Stunde",
+ "step2.peak_hours": "Spitzenzeiten",
+ "step2.work_hours": "Arbeitszeiten",
+ "step2.morning_hours": "Morgenzeiten",
+ "step2.off_peak_hours": "Nebenzeiten",
+ "step2.agent_config": "Agent-Konfiguration",
+ "step2.agent_count_unit": "Stueck",
+ "step2.active_timeline": "Aktive Zeiten",
+ "step2.posts_per_hour": "Beitraege/Std",
+ "step2.comments_per_hour": "Kommentare/Std",
+ "step2.response_delay": "Antwortverzoegerung",
+ "step2.activity_level": "Aktivitaetsgrad",
+ "step2.sentiment_bias": "Stimmungstendenz",
+ "step2.influence": "Einfluss",
+ "step2.rec_algo_config": "Empfehlungsalgorithmus-Konfiguration",
+ "step2.platform1_name": "Plattform 1: Marktplatz / Informationsfluss",
+ "step2.platform2_name": "Plattform 2: Themen / Community",
+ "step2.recency_weight": "Aktualitaetsgewicht",
+ "step2.popularity_weight": "Popularitaetsgewicht",
+ "step2.relevance_weight": "Relevanzgewicht",
+ "step2.viral_threshold": "Viraler Schwellenwert",
+ "step2.echo_chamber_strength": "Echokammer-Staerke",
+ "step2.llm_reasoning": "LLM-Konfigurationsschlussfolgerung",
+ "step2.orchestration_title": "Initiale Aktivierungsorchestrierung",
+ "step2.orchestration_desc": "Basierend auf der narrativen Richtung werden automatisch initiale Aktivierungsereignisse und Trendthemen generiert, um den Ausgangszustand der Simulationswelt zu steuern",
+ "step2.orchestration_processing": "Orchestrierung",
+ "step2.narrative_direction": "Narrative Leitrichtung",
+ "step2.hot_topics": "Initiale Trendthemen",
+ "step2.initial_posts": "Initiale Aktivierungssequenz",
+ "step2.ready_title": "Vorbereitung abgeschlossen",
+ "step2.ready_desc": "Die Simulationsumgebung ist bereit. Die Simulation kann jetzt gestartet werden.",
+ "step2.rounds_setting": "Simulationsrunden-Einstellung",
+ "step2.rounds_auto_desc": "MiroFish plant automatisch eine Simulation von {hours} Stunden, wobei jede Runde {minutes} Minuten Echtzeit entspricht",
+ "step2.custom": "Benutzerdefiniert",
+ "step2.estimated_time": "Bei 100 Agents: Geschaetzte Dauer ca. {minutes} Minuten",
+ "step2.estimated_time_auto": "Bei 100 Agents: Geschaetzte Dauer {minutes} Minuten",
+ "step2.recommend_custom": "Beim ersten Durchlauf wird dringend empfohlen, in den 'Benutzerdefinierten Modus' zu wechseln und die Rundenzahl zu reduzieren, um eine schnelle Vorschau zu ermoeglichen und Fehlerrisiken zu minimieren ➝",
+ "step2.recommend_label": "Empfohlen",
+ "step2.back_to_graph": "← Zurueck zum Graphaufbau",
+ "step2.start_dual_sim": "Parallele Dual-Welt-Simulation starten ➝",
+ "step2.modal_age": "Sichtbares Alter",
+ "step2.modal_age_unit": "Jahre",
+ "step2.modal_gender": "Sichtbares Geschlecht",
+ "step2.gender_male": "Maennlich",
+ "step2.gender_female": "Weiblich",
+ "step2.gender_other": "Sonstiges",
+ "step2.modal_country": "Land/Region",
+ "step2.modal_mbti": "Sichtbarer MBTI",
+ "step2.modal_bio_label": "Persoenlichkeitsbeschreibung",
+ "step2.modal_topics_label": "Mit Realitaetskeimen verknuepfte Themen",
+ "step2.modal_persona_label": "Detaillierter Persoenlichkeitshintergrund",
+ "step2.dim_experience": "Gesamterlebnis des Ereignisses",
+ "step2.dim_experience_desc": "Vollstaendige Verhaltensspur in diesem Ereignis",
+ "step2.dim_behavior": "Verhaltensmuster-Profil",
+ "step2.dim_behavior_desc": "Erfahrungszusammenfassung und Stilpraeferenzen",
+ "step2.dim_memory": "Einzigartige Erinnerungspraegung",
+ "step2.dim_memory_desc": "Auf Realitaetskeimen basierende Erinnerungen",
+ "step2.dim_social": "Soziales Beziehungsnetzwerk",
+ "step2.dim_social_desc": "Individuelle Verknuepfungen und Interaktionsgraph",
+
+ "step3.platform_twitter": "Info Plaza",
+ "step3.platform_reddit": "Topic Community",
+ "step3.stat_round": "ROUND",
+ "step3.stat_elapsed": "Verstrichene Zeit",
+ "step3.stat_acts": "ACTS",
+ "step3.available_actions": "Verfuegbare Aktionen",
+ "step3.generate_report_btn": "Ergebnisbericht generieren",
+ "step3.generating_btn": "Wird gestartet...",
+ "step3.total_events": "TOTAL EVENTS",
+ "step3.waiting_actions": "Warte auf Agent-Aktionen...",
+ "step3.log_title": "SIMULATION MONITOR",
+ "step3.error_no_sim_id": "Fehler: simulationId fehlt",
+ "step3.starting_sim": "Parallele Dual-Plattform-Simulation wird gestartet...",
+ "step3.graph_memory_enabled": "Dynamischer Graph-Aktualisierungsmodus aktiviert",
+ "step3.sim_started": "Simulations-Engine erfolgreich gestartet",
+ "step3.start_failed": "Start fehlgeschlagen",
+ "step3.start_error": "Startfehler",
+ "step3.unknown_error": "Unbekannter Fehler",
+ "step3.stopping_sim": "Simulation wird gestoppt...",
+ "step3.sim_stopped": "Simulation gestoppt",
+ "step3.stop_failed": "Stopp fehlgeschlagen",
+ "step3.stop_error": "Stoppfehler",
+ "step3.platforms_completed": "Alle Plattform-Simulationen beendet erkannt",
+ "step3.sim_completed": "Simulation abgeschlossen",
+ "step3.set_max_rounds": "Maximale Simulationsrunden gesetzt: {rounds}",
+ "step3.force_restarted": "Alte Simulationsprotokolle bereinigt, Simulation neu gestartet",
+
+ "step4.report_tag": "Prognosebericht",
+ "step4.loading_section": "{title} wird generiert...",
+ "step4.waiting_agent": "Warte auf Berichts-Agent...",
+ "step4.metric_sections": "Abschnitte",
+ "step4.metric_elapsed": "Verstrichene Zeit",
+ "step4.metric_tools": "Werkzeuge",
+ "step4.enter_interaction": "Zur Tiefeninteraktion",
+ "step4.log_title": "CONSOLE OUTPUT",
+
+ "step5.report_tag": "Prognosebericht",
+ "step5.loading_section": "{title} wird erstellt...",
+ "step5.waiting_agent": "Warte auf Berichts-Agent...",
+ "step5.interactive_tools": "Interaktive Werkzeuge",
+ "step5.agents_available": "{count} Agents verfuegbar",
+ "step5.chat_report_agent": "Mit Report Agent chatten",
+ "step5.chat_any_agent": "Mit beliebigem Individuum der Welt chatten",
+ "step5.select_agent": "Gespraechspartner waehlen",
+ "step5.send_survey": "Umfrage in die Welt senden",
+ "step5.report_agent_name": "Report Agent - Chat",
+ "step5.report_agent_desc": "Schnelle Chat-Version des Berichtsgenerierungs-Agenten mit 4 professionellen Werkzeugen und dem vollstaendigen MiroFish-Gedaechtnis",
+ "step5.tool_insight_name": "InsightForge Tiefenattribution",
+ "step5.tool_insight_desc": "Abgleich von Realitaetskeim-Daten mit dem Zustand der Simulationsumgebung, kombiniert mit Global/Local Memory-Mechanismus fuer zeituebergreifende Tiefenattributionsanalyse",
+ "step5.tool_panorama_name": "PanoramaSearch Panorama-Verfolgung",
+ "step5.tool_panorama_desc": "Breitensuche-Algorithmus basierend auf Graphstruktur, rekonstruiert Ereignisverbreitungspfade und erfasst die Topologie des gesamten Informationsflusses",
+ "step5.tool_quick_name": "QuickSearch Schnellsuche",
+ "step5.tool_quick_desc": "GraphRAG-basierte Sofortabfrage-Schnittstelle mit optimierter Indexeffizienz zur schnellen Extraktion konkreter Knotenattribute und diskreter Fakten",
+ "step5.tool_interview_name": "InterviewSubAgent Virtuelle Interviews",
+ "step5.tool_interview_desc": "Autonome Interviews mit parallelen Mehrrunden-Dialogen mit Individuen der Simulationswelt zur Erfassung unstrukturierter Meinungsdaten und psychologischer Zustaende",
+ "step5.profile_bio_label": "Beschreibung",
+ "step5.chat_empty_report": "Mit dem Report Agent chatten und den Berichtsinhalt vertiefen",
+ "step5.chat_empty_agent": "Mit simulierten Individuen chatten und ihre Ansichten kennenlernen",
+ "step5.chat_placeholder": "Geben Sie Ihre Frage ein...",
+ "step5.survey_select_title": "Umfrageteilnehmer waehlen",
+ "step5.survey_selected_count": "Ausgewaehlt {selected} / {total}",
+ "step5.survey_question_title": "Umfragefragen",
+ "step5.survey_question_placeholder": "Geben Sie die Frage ein, die Sie allen ausgewaehlten Teilnehmern stellen moechten...",
+ "step5.survey_submit": "Umfrage senden",
+ "step5.select_all": "Alle auswaehlen",
+ "step5.clear_all": "Leeren",
+ "step5.survey_results_title": "Umfrageergebnisse",
+ "step5.survey_results_count": "{count} Antworten",
+
+ "graph.panel_title": "Graph-Beziehungsvisualisierung",
+ "graph.refresh": "Aktualisieren",
+ "graph.refresh_tooltip": "Graph aktualisieren",
+ "graph.maximize_tooltip": "Maximieren/Wiederherstellen",
+ "graph.building_hint": "Echtzeit-Aktualisierung...",
+ "graph.simulating_hint": "GraphRAG Langzeit-/Kurzzeitgedaechtnis wird in Echtzeit aktualisiert",
+ "graph.finished_hint": "Es werden noch wenige Inhalte verarbeitet. Es wird empfohlen, den Graphen spaeter manuell zu aktualisieren.",
+ "graph.close_hint": "Hinweis schliessen",
+ "graph.node_details": "Knotendetails",
+ "graph.relationship": "Beziehung",
+
+ "history.title": "Simulationsprotokoll",
+ "history.graph_build": "Graphaufbau",
+ "history.env_setup": "Umgebungseinrichtung",
+ "history.report": "Analysebericht",
+ "history.more_files": "+{count} Dateien",
+ "history.no_files": "Keine Dateien",
+ "history.loading": "Wird geladen...",
+ "history.modal_requirement": "Simulationsanforderung",
+ "history.modal_none": "Keine",
+ "history.modal_files": "Verknuepfte Dateien",
+ "history.modal_no_files": "Keine verknuepften Dateien",
+ "history.modal_divider": "Simulationswiedergabe",
+ "history.modal_step1": "Graphaufbau",
+ "history.modal_step2": "Umgebungseinrichtung",
+ "history.modal_step4": "Analysebericht",
+ "history.playback_hint": "Step3 \"Simulation starten\" und Step5 \"Tiefeninteraktion\" muessen waehrend der Ausfuehrung gestartet werden und unterstuetzen keine historische Wiedergabe"
+}
diff --git a/frontend/src/i18n/en.json b/frontend/src/i18n/en.json
new file mode 100644
index 000000000..2d74d27b5
--- /dev/null
+++ b/frontend/src/i18n/en.json
@@ -0,0 +1,252 @@
+{
+ "nav.github_link": "Visit our Github page",
+
+ "home.tag": "Simple and Universal Swarm Intelligence Engine",
+ "home.version": "v0.1-Preview",
+ "home.title_line1": "Upload any report",
+ "home.title_line2": "Simulate the future instantly",
+ "home.description": "Even with just a single paragraph, {brand} can automatically generate a parallel world with up to {agents} based on the real-world seeds within. By injecting variables from a god's-eye view, it searches for the {optimum} in complex group interactions within dynamic environments.",
+ "home.description_brand": "MiroFish",
+ "home.description_agents": "millions of Agents",
+ "home.description_optimum": "\"local optimum\"",
+ "home.slogan": "Let the future rehearse in agent swarms, let decisions prevail after a hundred battles",
+ "home.status_title": "System Status",
+ "home.status_ready": "Ready",
+ "home.status_desc": "Prediction engine on standby. Upload multiple unstructured data files to initialize the simulation sequence.",
+ "home.metric_cost": "Low Cost",
+ "home.metric_cost_desc": "Average $5/simulation",
+ "home.metric_scale": "High Availability",
+ "home.metric_scale_desc": "Up to millions of Agents",
+ "home.workflow_title": "Workflow Sequence",
+ "home.step1_title": "Graph Building",
+ "home.step1_desc": "Real-world seed extraction & individual and group memory injection & GraphRAG construction",
+ "home.step2_title": "Environment Setup",
+ "home.step2_desc": "Entity-relationship extraction & persona generation & environment configuration agent injects simulation parameters",
+ "home.step3_title": "Start Simulation",
+ "home.step3_desc": "Dual-platform parallel simulation & automatic prediction demand parsing & dynamic temporal memory updates",
+ "home.step4_title": "Report Generation",
+ "home.step4_desc": "ReportAgent has a rich toolkit for deep interaction with the post-simulation environment",
+ "home.step5_title": "Deep Interaction",
+ "home.step5_desc": "Converse with any individual in the simulated world & chat with the ReportAgent",
+ "home.upload_title": "Drag files to upload",
+ "home.upload_hint": "or click to browse files",
+ "home.console_seed": "Real-World Seed",
+ "home.console_format": "Supported formats: PDF, MD, TXT",
+ "home.console_prompt": "Simulation Prompt",
+ "home.divider": "Input Parameters",
+ "home.placeholder": "// Enter simulation or prediction requirements in natural language (e.g. If University X announces a reversal of the disciplinary action, what public opinion trends will emerge?)",
+ "home.engine_badge": "Engine: MiroFish-V1.0",
+ "home.start_btn": "Start Engine",
+ "home.loading_btn": "Initializing...",
+
+ "main.view_graph": "Graph",
+ "main.view_split": "Split",
+ "main.view_workbench": "Workbench",
+ "main.step_prefix": "Step",
+ "main.step_names.0": "Graph Building",
+ "main.step_names.1": "Environment Setup",
+ "main.step_names.2": "Start Simulation",
+ "main.step_names.3": "Report Generation",
+ "main.step_names.4": "Deep Interaction",
+
+ "step1.ontology_title": "Ontology Generation",
+ "step1.ontology_desc": "LLM analyzes document content and simulation requirements, extracts real-world seeds, and automatically generates an appropriate ontology structure",
+ "step1.ontology_analyzing": "Analyzing documents...",
+ "step1.ontology_entity_badge": "ENTITY",
+ "step1.ontology_relation_badge": "RELATION",
+ "step1.graph_build_title": "GraphRAG Construction",
+ "step1.graph_build_desc": "Based on the generated ontology, documents are automatically chunked and processed via Zep to build a knowledge graph, extracting entities and relationships while forming temporal memories and community summaries",
+ "step1.stat_nodes": "Entity Nodes",
+ "step1.stat_edges": "Relationship Edges",
+ "step1.stat_types": "Schema Types",
+ "step1.complete_title": "Build Complete",
+ "step1.complete_desc": "Graph construction is complete. Please proceed to the next step for simulation environment setup.",
+ "step1.enter_env_setup": "Enter Environment Setup ➝",
+ "step1.creating": "Creating...",
+ "step1.missing_info": "Missing project or graph information",
+ "step1.create_failed": "Failed to create simulation",
+ "step1.create_error": "Error creating simulation",
+ "step1.unknown_error": "Unknown error",
+
+ "status.completed": "Completed",
+ "status.processing": "Processing",
+ "status.pending": "Pending",
+ "status.in_progress": "In Progress",
+ "status.initializing": "Initializing",
+
+ "step2.instance_title": "Simulation Instance Initialization",
+ "step2.instance_desc": "Create a new simulation instance and fetch simulation world parameter templates",
+ "step2.task_completed": "Async task completed",
+ "step2.profile_title": "Generate Agent Personas",
+ "step2.profile_desc": "Using context, automatically calls tools to extract entities and relationships from the knowledge graph, initializes simulated individuals, and assigns them unique behaviors and memories based on real-world seeds",
+ "step2.stat_current_agents": "Current Agents",
+ "step2.stat_expected_agents": "Expected Total Agents",
+ "step2.stat_topics": "Linked Topics from Real-World Seeds",
+ "step2.profiles_preview_title": "Generated Agent Personas",
+ "step2.unknown_profession": "Unknown profession",
+ "step2.no_bio": "No description available",
+ "step2.config_title": "Dual-Platform Simulation Configuration",
+ "step2.config_desc": "LLM intelligently configures world time flow, recommendation algorithms, each individual's active time slots, posting frequency, event triggers and other parameters based on simulation requirements and real-world seeds",
+ "step2.sim_duration": "Simulation Duration",
+ "step2.hours": "hours",
+ "step2.round_duration": "Duration per Round",
+ "step2.minutes": "minutes",
+ "step2.total_rounds": "Total Rounds",
+ "step2.rounds_unit": "rounds",
+ "step2.active_per_hour": "Active per Hour",
+ "step2.peak_hours": "Peak Hours",
+ "step2.work_hours": "Work Hours",
+ "step2.morning_hours": "Morning Hours",
+ "step2.off_peak_hours": "Off-Peak Hours",
+ "step2.agent_config": "Agent Configuration",
+ "step2.agent_count_unit": "agents",
+ "step2.active_timeline": "Active Hours",
+ "step2.posts_per_hour": "Posts/hr",
+ "step2.comments_per_hour": "Comments/hr",
+ "step2.response_delay": "Response Delay",
+ "step2.activity_level": "Activity Level",
+ "step2.sentiment_bias": "Sentiment Bias",
+ "step2.influence": "Influence",
+ "step2.rec_algo_config": "Recommendation Algorithm Configuration",
+ "step2.platform1_name": "Platform 1: Plaza / Feed",
+ "step2.platform2_name": "Platform 2: Topics / Community",
+ "step2.recency_weight": "Recency Weight",
+ "step2.popularity_weight": "Popularity Weight",
+ "step2.relevance_weight": "Relevance Weight",
+ "step2.viral_threshold": "Viral Threshold",
+ "step2.echo_chamber_strength": "Echo Chamber Strength",
+ "step2.llm_reasoning": "LLM Configuration Reasoning",
+ "step2.orchestration_title": "Initial Activation Orchestration",
+ "step2.orchestration_desc": "Based on narrative direction, automatically generates initial activation events and trending topics to guide the simulated world's initial state",
+ "step2.orchestration_processing": "Orchestrating",
+ "step2.narrative_direction": "Narrative Guidance Direction",
+ "step2.hot_topics": "Initial Trending Topics",
+ "step2.initial_posts": "Initial Activation Sequence",
+ "step2.ready_title": "Preparation Complete",
+ "step2.ready_desc": "The simulation environment is ready. You can now start running the simulation.",
+ "step2.rounds_setting": "Simulation Rounds Setting",
+ "step2.rounds_auto_desc": "MiroFish automatically plans a simulation of {hours} hours of real-world time, with each round representing {minutes} minutes of elapsed time",
+ "step2.custom": "Custom",
+ "step2.estimated_time": "With 100 Agents: Estimated duration approx. {minutes} minutes",
+ "step2.estimated_time_auto": "With 100 Agents: Estimated duration {minutes} minutes",
+ "step2.recommend_custom": "For your first run, it is strongly recommended to switch to 'Custom Mode' and reduce the number of rounds for a quick preview and lower error risk ➝",
+ "step2.recommend_label": "Recommended",
+ "step2.back_to_graph": "← Back to Graph Building",
+ "step2.start_dual_sim": "Start Dual-World Parallel Simulation ➝",
+ "step2.modal_age": "Apparent Age",
+ "step2.modal_age_unit": "years",
+ "step2.modal_gender": "Apparent Gender",
+ "step2.gender_male": "Male",
+ "step2.gender_female": "Female",
+ "step2.gender_other": "Other",
+ "step2.modal_country": "Country/Region",
+ "step2.modal_mbti": "Apparent MBTI",
+ "step2.modal_bio_label": "Persona Description",
+ "step2.modal_topics_label": "Topics Linked to Real-World Seeds",
+ "step2.modal_persona_label": "Detailed Persona Background",
+ "step2.dim_experience": "Full Event Experience",
+ "step2.dim_experience_desc": "Complete behavioral trajectory in this event",
+ "step2.dim_behavior": "Behavioral Pattern Profile",
+ "step2.dim_behavior_desc": "Experience summary and behavioral style preferences",
+ "step2.dim_memory": "Unique Memory Imprint",
+ "step2.dim_memory_desc": "Memories formed from real-world seeds",
+ "step2.dim_social": "Social Relationship Network",
+ "step2.dim_social_desc": "Individual connections and interaction graph",
+
+ "step3.platform_twitter": "Info Plaza",
+ "step3.platform_reddit": "Topic Community",
+ "step3.stat_round": "ROUND",
+ "step3.stat_elapsed": "Elapsed Time",
+ "step3.stat_acts": "ACTS",
+ "step3.available_actions": "Available Actions",
+ "step3.generate_report_btn": "Start Generating Result Report",
+ "step3.generating_btn": "Starting...",
+ "step3.total_events": "TOTAL EVENTS",
+ "step3.waiting_actions": "Waiting for agent actions...",
+ "step3.log_title": "SIMULATION MONITOR",
+ "step3.error_no_sim_id": "Error: missing simulationId",
+ "step3.starting_sim": "Starting dual-platform parallel simulation...",
+ "step3.graph_memory_enabled": "Dynamic graph update mode enabled",
+ "step3.sim_started": "Simulation engine started successfully",
+ "step3.start_failed": "Start failed",
+ "step3.start_error": "Start error",
+ "step3.unknown_error": "Unknown error",
+ "step3.stopping_sim": "Stopping simulation...",
+ "step3.sim_stopped": "Simulation stopped",
+ "step3.stop_failed": "Stop failed",
+ "step3.stop_error": "Stop error",
+ "step3.platforms_completed": "All platform simulations detected as finished",
+ "step3.sim_completed": "Simulation completed",
+ "step3.set_max_rounds": "Maximum simulation rounds set: {rounds}",
+ "step3.force_restarted": "Old simulation logs cleaned up, simulation restarted",
+
+ "step4.report_tag": "Prediction Report",
+ "step4.loading_section": "Generating {title}...",
+ "step4.waiting_agent": "Waiting for Report Agent...",
+ "step4.metric_sections": "Sections",
+ "step4.metric_elapsed": "Elapsed",
+ "step4.metric_tools": "Tools",
+ "step4.enter_interaction": "Enter Deep Interaction",
+ "step4.log_title": "CONSOLE OUTPUT",
+
+ "step5.report_tag": "Prediction Report",
+ "step5.loading_section": "Generating {title}...",
+ "step5.waiting_agent": "Waiting for Report Agent...",
+ "step5.interactive_tools": "Interactive Tools",
+ "step5.agents_available": "{count} agents available",
+ "step5.chat_report_agent": "Chat with Report Agent",
+ "step5.chat_any_agent": "Chat with any individual in the world",
+ "step5.select_agent": "Select conversation partner",
+ "step5.send_survey": "Send survey into the world",
+ "step5.report_agent_name": "Report Agent - Chat",
+ "step5.report_agent_desc": "Quick chat version of the report generation agent, with access to 4 professional tools and full MiroFish memory",
+ "step5.tool_insight_name": "InsightForge Deep Attribution",
+ "step5.tool_insight_desc": "Aligns real-world seed data with simulation environment state, combined with Global/Local Memory mechanisms for cross-temporal deep attribution analysis",
+ "step5.tool_panorama_name": "PanoramaSearch Panoramic Tracking",
+ "step5.tool_panorama_desc": "Graph-based breadth-first traversal algorithm that reconstructs event propagation paths and captures the topological structure of complete information flow",
+ "step5.tool_quick_name": "QuickSearch Fast Retrieval",
+ "step5.tool_quick_desc": "GraphRAG-based instant query interface with optimized index efficiency for rapid extraction of specific node attributes and discrete facts",
+ "step5.tool_interview_name": "InterviewSubAgent Virtual Interview",
+ "step5.tool_interview_desc": "Autonomous interviews with parallel multi-round dialogues with individuals in the simulated world, collecting unstructured opinion data and psychological states",
+ "step5.profile_bio_label": "Bio",
+ "step5.chat_empty_report": "Chat with Report Agent to gain deeper insights into the report content",
+ "step5.chat_empty_agent": "Chat with simulated individuals to understand their perspectives",
+ "step5.chat_placeholder": "Enter your question...",
+ "step5.survey_select_title": "Select Survey Respondents",
+ "step5.survey_selected_count": "Selected {selected} / {total}",
+ "step5.survey_question_title": "Survey Questions",
+ "step5.survey_question_placeholder": "Enter the question you want to ask all selected respondents...",
+ "step5.survey_submit": "Send Survey",
+ "step5.select_all": "Select All",
+ "step5.clear_all": "Clear",
+ "step5.survey_results_title": "Survey Results",
+ "step5.survey_results_count": "{count} responses",
+
+ "graph.panel_title": "Graph Relationship Visualization",
+ "graph.refresh": "Refresh",
+ "graph.refresh_tooltip": "Refresh graph",
+ "graph.maximize_tooltip": "Maximize/Restore",
+ "graph.building_hint": "Updating in real-time...",
+ "graph.simulating_hint": "GraphRAG long/short-term memory updating in real-time",
+ "graph.finished_hint": "Some content is still being processed. We recommend manually refreshing the graph later.",
+ "graph.close_hint": "Close hint",
+ "graph.node_details": "Node Details",
+ "graph.relationship": "Relationship",
+
+ "history.title": "Simulation Records",
+ "history.graph_build": "Graph Building",
+ "history.env_setup": "Environment Setup",
+ "history.report": "Analysis Report",
+ "history.more_files": "+{count} files",
+ "history.no_files": "No files",
+ "history.loading": "Loading...",
+ "history.modal_requirement": "Simulation Requirement",
+ "history.modal_none": "None",
+ "history.modal_files": "Associated Files",
+ "history.modal_no_files": "No associated files",
+ "history.modal_divider": "Simulation Playback",
+ "history.modal_step1": "Graph Building",
+ "history.modal_step2": "Environment Setup",
+ "history.modal_step4": "Analysis Report",
+ "history.playback_hint": "Step 3 \"Start Simulation\" and Step 5 \"Deep Interaction\" must be started during runtime and do not support historical playback"
+}
diff --git a/frontend/src/i18n/index.js b/frontend/src/i18n/index.js
new file mode 100644
index 000000000..fabae870d
--- /dev/null
+++ b/frontend/src/i18n/index.js
@@ -0,0 +1,26 @@
+import zh from './zh.json'
+import en from './en.json'
+import de from './de.json'
+
+const locales = { zh, en, de }
+let currentLang = 'zh'
+
+export function setLanguage(lang) {
+ if (locales[lang]) currentLang = lang
+}
+
+export function t(key) {
+ return locales[currentLang]?.[key] || locales.zh?.[key] || key
+}
+
+export async function initLanguage() {
+ try {
+ const res = await fetch('/api/config/language')
+ const data = await res.json()
+ if (data.language) setLanguage(data.language)
+ } catch (e) {
+ // Default to Chinese
+ }
+}
+
+export function getLanguage() { return currentLang }
diff --git a/frontend/src/i18n/zh.json b/frontend/src/i18n/zh.json
new file mode 100644
index 000000000..05de0fc45
--- /dev/null
+++ b/frontend/src/i18n/zh.json
@@ -0,0 +1,252 @@
+{
+ "nav.github_link": "访问我们的Github主页",
+
+ "home.tag": "简洁通用的群体智能引擎",
+ "home.version": "v0.1-预览版",
+ "home.title_line1": "上传任意报告",
+ "home.title_line2": "即刻推演未来",
+ "home.description": "即使只有一段文字,{brand} 也能基于其中的现实种子,全自动生成与之对应的至多{agents}构成的平行世界。通过上帝视角注入变量,在复杂的群体交互中寻找动态环境下的{optimum}",
+ "home.description_brand": "MiroFish",
+ "home.description_agents": "百万级Agent",
+ "home.description_optimum": "\"局部最优解\"",
+ "home.slogan": "让未来在 Agent 群中预演,让决策在百战后胜出",
+ "home.status_title": "系统状态",
+ "home.status_ready": "准备就绪",
+ "home.status_desc": "预测引擎待命中,可上传多份非结构化数据以初始化模拟序列",
+ "home.metric_cost": "低成本",
+ "home.metric_cost_desc": "常规模拟平均5$/次",
+ "home.metric_scale": "高可用",
+ "home.metric_scale_desc": "最多百万级Agent模拟",
+ "home.workflow_title": "工作流序列",
+ "home.step1_title": "图谱构建",
+ "home.step1_desc": "现实种子提取 & 个体与群体记忆注入 & GraphRAG构建",
+ "home.step2_title": "环境搭建",
+ "home.step2_desc": "实体关系抽取 & 人设生成 & 环境配置Agent注入仿真参数",
+ "home.step3_title": "开始模拟",
+ "home.step3_desc": "双平台并行模拟 & 自动解析预测需求 & 动态更新时序记忆",
+ "home.step4_title": "报告生成",
+ "home.step4_desc": "ReportAgent拥有丰富的工具集与模拟后环境进行深度交互",
+ "home.step5_title": "深度互动",
+ "home.step5_desc": "与模拟世界中的任意一位进行对话 & 与ReportAgent进行对话",
+ "home.upload_title": "拖拽文件上传",
+ "home.upload_hint": "或点击浏览文件系统",
+ "home.console_seed": "现实种子",
+ "home.console_format": "支持格式: PDF, MD, TXT",
+ "home.console_prompt": "模拟提示词",
+ "home.divider": "输入参数",
+ "home.placeholder": "// 用自然语言输入模拟或预测需求(例.武大若发布撤销肖某处分的公告,会引发什么舆情走向)",
+ "home.engine_badge": "引擎: MiroFish-V1.0",
+ "home.start_btn": "启动引擎",
+ "home.loading_btn": "初始化中...",
+
+ "main.view_graph": "图谱",
+ "main.view_split": "双栏",
+ "main.view_workbench": "工作台",
+ "main.step_prefix": "Step",
+ "main.step_names.0": "图谱构建",
+ "main.step_names.1": "环境搭建",
+ "main.step_names.2": "开始模拟",
+ "main.step_names.3": "报告生成",
+ "main.step_names.4": "深度互动",
+
+ "step1.ontology_title": "本体生成",
+ "step1.ontology_desc": "LLM分析文档内容与模拟需求,提取出现实种子,自动生成合适的本体结构",
+ "step1.ontology_analyzing": "正在分析文档...",
+ "step1.ontology_entity_badge": "ENTITY",
+ "step1.ontology_relation_badge": "RELATION",
+ "step1.graph_build_title": "GraphRAG构建",
+ "step1.graph_build_desc": "基于生成的本体,将文档自动分块后调用 Zep 构建知识图谱,提取实体和关系,并形成时序记忆与社区摘要",
+ "step1.stat_nodes": "实体节点",
+ "step1.stat_edges": "关系边",
+ "step1.stat_types": "SCHEMA类型",
+ "step1.complete_title": "构建完成",
+ "step1.complete_desc": "图谱构建已完成,请进入下一步进行模拟环境搭建",
+ "step1.enter_env_setup": "进入环境搭建 ➝",
+ "step1.creating": "创建中...",
+ "step1.missing_info": "缺少项目或图谱信息",
+ "step1.create_failed": "创建模拟失败",
+ "step1.create_error": "创建模拟异常",
+ "step1.unknown_error": "未知错误",
+
+ "status.completed": "已完成",
+ "status.processing": "生成中",
+ "status.pending": "等待",
+ "status.in_progress": "进行中",
+ "status.initializing": "初始化",
+
+ "step2.instance_title": "模拟实例初始化",
+ "step2.instance_desc": "新建simulation实例,拉取模拟世界参数模版",
+ "step2.task_completed": "异步任务已完成",
+ "step2.profile_title": "生成 Agent 人设",
+ "step2.profile_desc": "结合上下文,自动调用工具从知识图谱梳理实体与关系,初始化模拟个体,并基于现实种子赋予他们独特的行为与记忆",
+ "step2.stat_current_agents": "当前Agent数",
+ "step2.stat_expected_agents": "预期Agent总数",
+ "step2.stat_topics": "现实种子当前关联话题数",
+ "step2.profiles_preview_title": "已生成的 Agent 人设",
+ "step2.unknown_profession": "未知职业",
+ "step2.no_bio": "暂无简介",
+ "step2.config_title": "生成双平台模拟配置",
+ "step2.config_desc": "LLM 根据模拟需求与现实种子,智能设置世界时间流速、推荐算法、每个个体的活跃时间段、发言频率、事件触发等参数",
+ "step2.sim_duration": "模拟时长",
+ "step2.hours": "小时",
+ "step2.round_duration": "每轮时长",
+ "step2.minutes": "分钟",
+ "step2.total_rounds": "总轮次",
+ "step2.rounds_unit": "轮",
+ "step2.active_per_hour": "每小时活跃",
+ "step2.peak_hours": "高峰时段",
+ "step2.work_hours": "工作时段",
+ "step2.morning_hours": "早间时段",
+ "step2.off_peak_hours": "低谷时段",
+ "step2.agent_config": "Agent 配置",
+ "step2.agent_count_unit": "个",
+ "step2.active_timeline": "活跃时段",
+ "step2.posts_per_hour": "发帖/时",
+ "step2.comments_per_hour": "评论/时",
+ "step2.response_delay": "响应延迟",
+ "step2.activity_level": "活跃度",
+ "step2.sentiment_bias": "情感倾向",
+ "step2.influence": "影响力",
+ "step2.rec_algo_config": "推荐算法配置",
+ "step2.platform1_name": "平台 1:广场 / 信息流",
+ "step2.platform2_name": "平台 2:话题 / 社区",
+ "step2.recency_weight": "时效权重",
+ "step2.popularity_weight": "热度权重",
+ "step2.relevance_weight": "相关性权重",
+ "step2.viral_threshold": "病毒阈值",
+ "step2.echo_chamber_strength": "回音室强度",
+ "step2.llm_reasoning": "LLM 配置推理",
+ "step2.orchestration_title": "初始激活编排",
+ "step2.orchestration_desc": "基于叙事方向,自动生成初始激活事件与热点话题,引导模拟世界的初始状态",
+ "step2.orchestration_processing": "编排中",
+ "step2.narrative_direction": "叙事引导方向",
+ "step2.hot_topics": "初始热点话题",
+ "step2.initial_posts": "初始激活序列",
+ "step2.ready_title": "准备完成",
+ "step2.ready_desc": "模拟环境已准备完成,可以开始运行模拟",
+ "step2.rounds_setting": "模拟轮数设定",
+ "step2.rounds_auto_desc": "MiroFish 自动规划推演现实 {hours} 小时,每轮代表现实 {minutes} 分钟时间流逝",
+ "step2.custom": "自定义",
+ "step2.estimated_time": "若Agent规模为100:预计耗时约 {minutes} 分钟",
+ "step2.estimated_time_auto": "若Agent规模为100:预计耗时 {minutes} 分钟",
+ "step2.recommend_custom": "若首次运行,强烈建议切换至'自定义模式'减少模拟轮数,以便快速预览效果并降低报错风险 ➝",
+ "step2.recommend_label": "推荐",
+ "step2.back_to_graph": "← 返回图谱构建",
+ "step2.start_dual_sim": "开始双世界并行模拟 ➝",
+ "step2.modal_age": "事件外显年龄",
+ "step2.modal_age_unit": "岁",
+ "step2.modal_gender": "事件外显性别",
+ "step2.gender_male": "男",
+ "step2.gender_female": "女",
+ "step2.gender_other": "其他",
+ "step2.modal_country": "国家/地区",
+ "step2.modal_mbti": "事件外显MBTI",
+ "step2.modal_bio_label": "人设简介",
+ "step2.modal_topics_label": "现实种子关联话题",
+ "step2.modal_persona_label": "详细人设背景",
+ "step2.dim_experience": "事件全景经历",
+ "step2.dim_experience_desc": "在此事件中的完整行为轨迹",
+ "step2.dim_behavior": "行为模式侧写",
+ "step2.dim_behavior_desc": "经验总结与行事风格偏好",
+ "step2.dim_memory": "独特记忆印记",
+ "step2.dim_memory_desc": "基于现实种子形成的记忆",
+ "step2.dim_social": "社会关系网络",
+ "step2.dim_social_desc": "个体链接与交互图谱",
+
+ "step3.platform_twitter": "Info Plaza",
+ "step3.platform_reddit": "Topic Community",
+ "step3.stat_round": "ROUND",
+ "step3.stat_elapsed": "Elapsed Time",
+ "step3.stat_acts": "ACTS",
+ "step3.available_actions": "Available Actions",
+ "step3.generate_report_btn": "开始生成结果报告",
+ "step3.generating_btn": "启动中...",
+ "step3.total_events": "TOTAL EVENTS",
+ "step3.waiting_actions": "Waiting for agent actions...",
+ "step3.log_title": "SIMULATION MONITOR",
+ "step3.error_no_sim_id": "错误:缺少 simulationId",
+ "step3.starting_sim": "正在启动双平台并行模拟...",
+ "step3.graph_memory_enabled": "已开启动态图谱更新模式",
+ "step3.sim_started": "模拟引擎启动成功",
+ "step3.start_failed": "启动失败",
+ "step3.start_error": "启动异常",
+ "step3.unknown_error": "未知错误",
+ "step3.stopping_sim": "正在停止模拟...",
+ "step3.sim_stopped": "模拟已停止",
+ "step3.stop_failed": "停止失败",
+ "step3.stop_error": "停止异常",
+ "step3.platforms_completed": "检测到所有平台模拟已结束",
+ "step3.sim_completed": "模拟已完成",
+ "step3.set_max_rounds": "设置最大模拟轮数: {rounds}",
+ "step3.force_restarted": "已清理旧的模拟日志,重新开始模拟",
+
+ "step4.report_tag": "Prediction Report",
+ "step4.loading_section": "正在生成{title}...",
+ "step4.waiting_agent": "Waiting for Report Agent...",
+ "step4.metric_sections": "Sections",
+ "step4.metric_elapsed": "Elapsed",
+ "step4.metric_tools": "Tools",
+ "step4.enter_interaction": "进入深度互动",
+ "step4.log_title": "CONSOLE OUTPUT",
+
+ "step5.report_tag": "Prediction Report",
+ "step5.loading_section": "正在生成{title}...",
+ "step5.waiting_agent": "Waiting for Report Agent...",
+ "step5.interactive_tools": "Interactive Tools",
+ "step5.agents_available": "{count} agents available",
+ "step5.chat_report_agent": "与Report Agent对话",
+ "step5.chat_any_agent": "与世界中任意个体对话",
+ "step5.select_agent": "选择对话对象",
+ "step5.send_survey": "发送问卷调查到世界中",
+ "step5.report_agent_name": "Report Agent - Chat",
+ "step5.report_agent_desc": "报告生成智能体的快速对话版本,可调用 4 种专业工具,拥有MiroFish的完整记忆",
+ "step5.tool_insight_name": "InsightForge 深度归因",
+ "step5.tool_insight_desc": "对齐现实世界种子数据与模拟环境状态,结合Global/Local Memory机制,提供跨时空的深度归因分析",
+ "step5.tool_panorama_name": "PanoramaSearch 全景追踪",
+ "step5.tool_panorama_desc": "基于图结构的广度遍历算法,重构事件传播路径,捕获全量信息流动的拓扑结构",
+ "step5.tool_quick_name": "QuickSearch 快速检索",
+ "step5.tool_quick_desc": "基于 GraphRAG 的即时查询接口,优化索引效率,用于快速提取具体的节点属性与离散事实",
+ "step5.tool_interview_name": "InterviewSubAgent 虚拟访谈",
+ "step5.tool_interview_desc": "自主式访谈,能够并行与模拟世界中个体进行多轮对话,采集非结构化的观点数据与心理状态",
+ "step5.profile_bio_label": "简介",
+ "step5.chat_empty_report": "与 Report Agent 对话,深入了解报告内容",
+ "step5.chat_empty_agent": "与模拟个体对话,了解他们的观点",
+ "step5.chat_placeholder": "输入您的问题...",
+ "step5.survey_select_title": "选择调查对象",
+ "step5.survey_selected_count": "已选 {selected} / {total}",
+ "step5.survey_question_title": "问卷问题",
+ "step5.survey_question_placeholder": "输入您想问所有被选中对象的问题...",
+ "step5.survey_submit": "发送问卷",
+ "step5.select_all": "全选",
+ "step5.clear_all": "清空",
+ "step5.survey_results_title": "调查结果",
+ "step5.survey_results_count": "{count} 条回复",
+
+ "graph.panel_title": "Graph Relationship Visualization",
+ "graph.refresh": "Refresh",
+ "graph.refresh_tooltip": "刷新图谱",
+ "graph.maximize_tooltip": "最大化/还原",
+ "graph.building_hint": "实时更新中...",
+ "graph.simulating_hint": "GraphRAG长短期记忆实时更新中",
+ "graph.finished_hint": "还有少量内容处理中,建议稍后手动刷新图谱",
+ "graph.close_hint": "关闭提示",
+ "graph.node_details": "Node Details",
+ "graph.relationship": "Relationship",
+
+ "history.title": "推演记录",
+ "history.graph_build": "图谱构建",
+ "history.env_setup": "环境搭建",
+ "history.report": "分析报告",
+ "history.more_files": "+{count} 个文件",
+ "history.no_files": "暂无文件",
+ "history.loading": "加载中...",
+ "history.modal_requirement": "模拟需求",
+ "history.modal_none": "无",
+ "history.modal_files": "关联文件",
+ "history.modal_no_files": "暂无关联文件",
+ "history.modal_divider": "推演回放",
+ "history.modal_step1": "图谱构建",
+ "history.modal_step2": "环境搭建",
+ "history.modal_step4": "分析报告",
+ "history.playback_hint": "Step3「开始模拟」与 Step5「深度互动」需在运行中启动,不支持历史回放"
+}