Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 78 additions & 9 deletions dashboard/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
DIST = BASE / 'dist' # React 构建产物 (npm run build)
DATA = BASE.parent / "data"
SCRIPTS = BASE.parent / 'scripts'
_ACTIVE_TASK_DATA_DIR = None

# 静态资源 MIME 类型
_MIME_TYPES = {
Expand Down Expand Up @@ -82,21 +83,84 @@ def now_iso():
return datetime.datetime.now(datetime.timezone.utc).isoformat().replace('+00:00', 'Z')


def load_tasks():
return atomic_json_read(DATA / 'tasks_source.json', [])
def _iter_task_data_dirs():
"""返回可用的任务数据目录候选(优先 workspace,其次本地 data)。"""
Copy link

Copilot AI Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_iter_task_data_dirs() 的 docstring 说“优先 workspace,其次本地 data”,但当前实现 dirs = [DATA] 后再 append workspace;在打分相同(或不存在 tasks_source 时回退)的情况下会更倾向选择本地 DATA,和说明不一致。建议要么调整候选顺序/加显式 tie-breaker,要么修正文档描述以匹配实际策略。

Suggested change
"""返回可用的任务数据目录候选(优先 workspace,其次本地 data)。"""
"""返回可用的任务数据目录候选(优先本地 data,其次 workspace)。"""

Copilot uses AI. Check for mistakes.
dirs = [DATA]
oclaw_home = pathlib.Path.home() / '.openclaw'
for p in sorted(oclaw_home.glob('workspace-*/data')):
Comment on lines +89 to +90
Copy link

Copilot AI Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里重新计算了 oclaw_home = pathlib.Path.home() / '.openclaw',而文件顶部已经有可复用且便于测试/patch 的 OCLAW_HOME 常量。建议直接使用 OCLAW_HOME,以免后续需要在测试或部署中重定向 OpenClaw 目录时出现行为不一致。

Suggested change
oclaw_home = pathlib.Path.home() / '.openclaw'
for p in sorted(oclaw_home.glob('workspace-*/data')):
for p in sorted(OCLAW_HOME.glob('workspace-*/data')):

Copilot uses AI. Check for mistakes.
if p.is_dir():
dirs.append(p)
return dirs


def save_tasks(tasks):
atomic_json_write(DATA / 'tasks_source.json', tasks)
# Trigger refresh (异步,不阻塞,避免僵尸进程)
def _task_source_score(task_file: pathlib.Path):
"""给任务源打分:优先非 demo 任务,其次任务数,再按文件更新时间。"""
try:
tasks = atomic_json_read(task_file, [])
except Exception:
tasks = []
if not isinstance(tasks, list):
tasks = []
non_demo = 0
for t in tasks:
tid = str((t or {}).get('id', ''))
if tid and not tid.startswith('JJC-DEMO'):
non_demo += 1
try:
mtime = task_file.stat().st_mtime
except Exception:
mtime = 0
return (1 if non_demo > 0 else 0, non_demo, len(tasks), mtime)


def get_task_data_dir():
"""自动选择当前任务数据目录,并缓存结果以保持一次服务期内稳定。"""
global _ACTIVE_TASK_DATA_DIR
if _ACTIVE_TASK_DATA_DIR and _ACTIVE_TASK_DATA_DIR.is_dir():
return _ACTIVE_TASK_DATA_DIR

best_dir = DATA
best_score = (-1, -1, -1, -1)
for d in _iter_task_data_dirs():
tf = d / 'tasks_source.json'
Comment on lines +116 to +125
Copy link

Copilot AI Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

新增的数据目录自动探测会读取真实用户目录 ~/.openclaw/workspace-*/data,这会让现有的 tests/test_server.py 通过 patch srv.DATA 的方式无法完全隔离测试环境(可能被本机 workspace 的 tasks_source.json“抢走”数据源),导致测试与本地环境耦合。建议补充/调整测试覆盖:在测试里创建 tasks_source.json 并验证 get_task_data_dir() 的选择逻辑可控(或提供可注入/可禁用 workspace 探测的开关)。

Copilot uses AI. Check for mistakes.
if not tf.exists():
continue
score = _task_source_score(tf)
if score > best_score:
best_score = score
best_dir = d

Comment on lines +119 to +132
Copy link

Copilot AI Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

get_task_data_dir() 一旦把 _ACTIVE_TASK_DATA_DIR 缓存成某个目录,就只检查 is_dir() 并直接返回;如果服务启动时没有任何候选包含 tasks_source.json(或后续才在 workspace 生成),缓存会永久锁定到默认 DATA,导致多工作区场景下仍可能读错数据源。建议缓存前至少要求目录内存在 tasks_source.json,或在缓存命中时也验证该文件仍存在/必要时重新探测。

Suggested change
if _ACTIVE_TASK_DATA_DIR and _ACTIVE_TASK_DATA_DIR.is_dir():
return _ACTIVE_TASK_DATA_DIR
best_dir = DATA
best_score = (-1, -1, -1, -1)
for d in _iter_task_data_dirs():
tf = d / 'tasks_source.json'
if not tf.exists():
continue
score = _task_source_score(tf)
if score > best_score:
best_score = score
best_dir = d
# 缓存命中时,既要求目录存在,也要求其中仍有 tasks_source.json
if _ACTIVE_TASK_DATA_DIR and _ACTIVE_TASK_DATA_DIR.is_dir():
if (_ACTIVE_TASK_DATA_DIR / 'tasks_source.json').exists():
return _ACTIVE_TASK_DATA_DIR
# 若缓存目录已不再包含任务源文件,则丢弃缓存并重新探测
_ACTIVE_TASK_DATA_DIR = None
best_dir = DATA
best_score = None
for d in _iter_task_data_dirs():
tf = d / 'tasks_source.json'
if not tf.exists():
continue
score = _task_source_score(tf)
if best_score is None or score > best_score:
best_score = score
best_dir = d
# 仅在找到至少一个包含 tasks_source.json 的目录时才缓存;
# 否则返回默认 DATA,但不缓存,以便后续新建 workspace 时能重新探测。
if best_score is None:
log.info(f'任务数据源: 默认 DATA(未找到 tasks_source.json)')
return best_dir

Copilot uses AI. Check for mistakes.
_ACTIVE_TASK_DATA_DIR = best_dir
log.info(f'任务数据源: {_ACTIVE_TASK_DATA_DIR}')
return _ACTIVE_TASK_DATA_DIR


def _refresh_live_data_async(task_data_dir: pathlib.Path):
"""触发对应数据目录的 live_status 刷新脚本。"""
script = task_data_dir.parent / 'scripts' / 'refresh_live_data.py'
if not script.exists():
script = SCRIPTS / 'refresh_live_data.py'

def _refresh():
try:
subprocess.run(['python3', str(SCRIPTS / 'refresh_live_data.py')], timeout=30)
subprocess.run(['python3', str(script)], timeout=30)
Comment on lines +138 to +146
Copy link

Copilot AI Mar 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_refresh_live_data_async() 在 workspace 目录下找不到脚本时会回退到仓库 scripts/refresh_live_data.py;但该脚本内部写死使用自身所在目录的 DATA = BASE / 'data'(见 scripts/refresh_live_data.py),会把 live_status.json 写回仓库 data,而不是当前选中的 task_data_dir。这会造成 save_tasks 之后看板仍读取不到最新 live_status。建议让刷新脚本支持传入 dataDir(参数或环境变量),或在回退时显式传递/切换到能写入 task_data_dir 的实现。

Copilot uses AI. Check for mistakes.
except Exception as e:
log.warning(f'refresh_live_data.py 触发失败: {e}')

threading.Thread(target=_refresh, daemon=True).start()


def load_tasks():
task_data_dir = get_task_data_dir()
return atomic_json_read(task_data_dir / 'tasks_source.json', [])


def save_tasks(tasks):
task_data_dir = get_task_data_dir()
atomic_json_write(task_data_dir / 'tasks_source.json', tasks)
_refresh_live_data_async(task_data_dir)


def handle_task_action(task_id, action, reason):
"""Stop/cancel/resume a task from the dashboard."""
tasks = load_tasks()
Expand Down Expand Up @@ -2124,12 +2188,17 @@ def do_GET(self):
if p in ('', '/dashboard', '/dashboard.html'):
self.send_file(DIST / 'index.html')
elif p == '/healthz':
checks = {'dataDir': DATA.is_dir(), 'tasksReadable': (DATA / 'tasks_source.json').exists()}
checks['dataWritable'] = os.access(str(DATA), os.W_OK)
task_data_dir = get_task_data_dir()
checks = {
'dataDir': task_data_dir.is_dir(),
'tasksReadable': (task_data_dir / 'tasks_source.json').exists(),
}
checks['dataWritable'] = os.access(str(task_data_dir), os.W_OK)
all_ok = all(checks.values())
self.send_json({'status': 'ok' if all_ok else 'degraded', 'ts': now_iso(), 'checks': checks})
elif p == '/api/live-status':
self.send_json(read_json(DATA / 'live_status.json'))
task_data_dir = get_task_data_dir()
self.send_json(read_json(task_data_dir / 'live_status.json'))
elif p == '/api/agent-config':
self.send_json(read_json(DATA / 'agent_config.json'))
elif p == '/api/model-change-log':
Expand Down
Loading