diff --git a/Cargo.toml b/Cargo.toml index cd72b701..32f6902c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ members = [ "engine/crates/fx-improve", "engine/crates/fx-transactions", "engine/crates/fx-scratchpad", + "engine/crates/fx-preprocess", "engine/crates/fawx-ripcord", "engine/crates/fx-ripcord", diff --git a/ENGINEERING.md b/ENGINEERING.md index cd0a495a..7528c415 100644 --- a/ENGINEERING.md +++ b/ENGINEERING.md @@ -203,11 +203,42 @@ feature/* → dev → staging → main - **feature branches**: cut from `dev`, PRs target `dev` - **dev**: integration branch — merge freely after CI + TUI smoke test pass. Multiple features tested together here. -- **staging**: release candidate — maintainer promotes `dev → staging` after integration testing passes +- **staging**: release candidate — Joe manually promotes `dev → staging` after integration testing passes - **main**: production releases only — `staging → main` for releases All three long-lived branches are protected: no force push, no deletion. --- -*This file defines the engineering standards for the Fawx codebase. All contributions are held to these rules. For style preferences, see `TASTE.md`.* +## 7. Agent Execution Model + +### Roles +- **Clawdio main session** is the lead. Orchestrates, designs, reviews results, makes architectural calls. Does NOT write code, regardless of size. All code is delegated to subagents. +- **Subagents** do all implementation, review, and fix work. + +### Model policy +- **Implementers + Fixers** (code generation): `model: "openai-codex/gpt-5.4"`, `thinking: "xhigh"`. +- **Reviewers** (code analysis): `model: "anthropic/claude-opus-4-6"`, `thinking: "adaptive"`. +- GPT-5.4 xhigh for writing code, Opus adaptive for judging code. No Sonnet unless Joe explicitly requests it. +- Always use full model paths — never aliases (can silently fall back to wrong provider). + +### Orchestration model +- **Main session owns the state machine.** Clawdio directly manages implement → review → fix → re-review loops. Do not delegate lifecycle management to N+1 orchestrator subagents. +- **Subagents get single-responsibility prompts.** One job each: "implement this spec," "review this diff," "fix these findings." +- **Spec-driven implementation.** Implementers receive a written spec file, not prose descriptions. + +### Concurrency +- **Simple** (< 50 lines): Direct Codex worker + Opus review. Parallel OK. +- **Standard** (single-PR features): Main session spawns workers directly. Parallel PRs OK (max 2-3) if no file overlap. +- **Complex** (multi-crate, architectural): **Sequential only — one PR at a time.** Main session manages full context. + +### Rules +1. Main session NEVER writes code. All code work delegated to subagents, no exceptions. +2. Implementers + Fixers use `openai-codex/gpt-5.4` with `thinking: "xhigh"`. Reviewers use `anthropic/claude-opus-4-6` with `thinking: "adaptive"`. +3. Main session chains stages (implement → review → fix → re-review) directly — no N+1 orchestrator layer. +4. All review findings (blocking, non-blocking, nice-to-have) must be fixed. Fresh reviewer for R2. +5. Every subagent prompt includes ENGINEERING.md rules and the spec file path. + +--- + +*This file is immutable doctrine. Cite it in PR reviews. Changes require explicit user approval. For evolving preferences and style, see `TASTE.md`.* diff --git a/engine/crates/fx-api/src/bundle.rs b/engine/crates/fx-api/src/bundle.rs index 2459a9e9..ad6979d0 100644 --- a/engine/crates/fx-api/src/bundle.rs +++ b/engine/crates/fx-api/src/bundle.rs @@ -60,9 +60,9 @@ mod tests { #[test] fn find_bundle_root_finds_nested_app() { - let path = Path::new("/Applications/Fawx.app/Contents/MacOS/fawx-server"); + let path = Path::new("/Users/joe/Desktop/Fawx.app/Contents/MacOS/fawx-server"); let root = find_bundle_root(path); - assert_eq!(root, Some(PathBuf::from("/Applications/Fawx.app"))); + assert_eq!(root, Some(PathBuf::from("/Users/joe/Desktop/Fawx.app"))); } #[test] diff --git a/engine/crates/fx-api/src/devices.rs b/engine/crates/fx-api/src/devices.rs index 2d9d960c..13d58654 100644 --- a/engine/crates/fx-api/src/devices.rs +++ b/engine/crates/fx-api/src/devices.rs @@ -220,7 +220,7 @@ mod tests { #[test] fn create_device_returns_hashed_token() { let mut store = DeviceStore::new(); - let (raw_token, device) = store.create_device("Example MacBook"); + let (raw_token, device) = store.create_device("Joe's MacBook"); assert!(raw_token.starts_with(DEVICE_TOKEN_PREFIX)); assert_eq!( @@ -235,18 +235,18 @@ mod tests { #[test] fn list_device_info_excludes_token_hash() { let mut store = DeviceStore::new(); - let _ = store.create_device("Example MacBook"); + let _ = store.create_device("Joe's MacBook"); let json = serde_json::to_value(store.list_device_info()).expect("serialize device info"); assert!(json[0].get("token_hash").is_none()); - assert_eq!(json[0]["device_name"], "Example MacBook"); + assert_eq!(json[0]["device_name"], "Joe's MacBook"); } #[test] fn authenticate_works() { let mut store = DeviceStore::new(); - let (raw_token, device) = store.create_device("Example MacBook"); + let (raw_token, device) = store.create_device("Joe's MacBook"); store.list_devices_mut()[0].last_used_at = 0; assert_eq!(store.authenticate(&raw_token), Some(device.id)); @@ -257,7 +257,7 @@ mod tests { #[test] fn revoke_invalidates_device() { let mut store = DeviceStore::new(); - let (raw_token, device) = store.create_device("Example MacBook"); + let (raw_token, device) = store.create_device("Joe's MacBook"); assert_eq!(store.revoke(&device.id), Some(device.clone())); assert!(store.revoke(&device.id).is_none()); @@ -269,7 +269,7 @@ mod tests { let temp = tempdir().expect("tempdir"); let path = temp.path().join("devices.json"); let mut store = DeviceStore::new(); - let (raw_token, _) = store.create_device("Example MacBook"); + let (raw_token, _) = store.create_device("Joe's MacBook"); store.save(&path).expect("save device store"); let mut loaded = DeviceStore::load(&path); @@ -286,7 +286,7 @@ mod tests { let temp = tempdir().expect("tempdir"); let path = temp.path().join("devices.json"); let mut store = DeviceStore::new(); - let _ = store.create_device("Example MacBook"); + let _ = store.create_device("Joe's MacBook"); store.save(&path).expect("save device store"); let mode = fs::metadata(&path).expect("metadata").permissions().mode() & 0o777; @@ -302,7 +302,7 @@ mod tests { devices: vec![DeviceToken { id: "dev-123".to_string(), token_hash: "hash".to_string(), - device_name: "Example MacBook".to_string(), + device_name: "Joe's MacBook".to_string(), created_at: 1_700_000_000_000, last_used_at: 1_700_000_005_000, }], diff --git a/engine/crates/fx-api/src/handlers/fleet.rs b/engine/crates/fx-api/src/handlers/fleet.rs index e0a8bfed..31a54214 100644 --- a/engine/crates/fx-api/src/handlers/fleet.rs +++ b/engine/crates/fx-api/src/handlers/fleet.rs @@ -157,7 +157,7 @@ mod tests { let temp_dir = tempfile::TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); TestFleet { _temp_dir: temp_dir, @@ -168,7 +168,7 @@ mod tests { fn registration_request(token: &str) -> FleetRegistrationRequest { FleetRegistrationRequest { - node_name: "node-a".to_string(), + node_name: "macmini".to_string(), bearer_token: token.to_string(), capabilities: vec!["agentic_loop".to_string(), "macos-aarch64".to_string()], rust_version: Some("1.85.0".to_string()), diff --git a/engine/crates/fx-api/src/handlers/fleet_dashboard.rs b/engine/crates/fx-api/src/handlers/fleet_dashboard.rs index 10252c53..94e9d1d5 100644 --- a/engine/crates/fx-api/src/handlers/fleet_dashboard.rs +++ b/engine/crates/fx-api/src/handlers/fleet_dashboard.rs @@ -360,7 +360,7 @@ mod tests { fn node_dto_serializes() { let response = FleetNodeDto { id: "node-1".to_string(), - name: "Worker Node A".to_string(), + name: "Mac Mini".to_string(), status: "healthy".to_string(), last_seen_at: 1_742_000_100, active_tasks: 0, @@ -373,7 +373,7 @@ mod tests { json, json!({ "id": "node-1", - "name": "Worker Node A", + "name": "Mac Mini", "status": "healthy", "last_seen_at": 1_742_000_100, "active_tasks": 0, @@ -433,7 +433,7 @@ mod tests { fn effective_status_marks_old_busy_nodes_degraded() { let node = NodeInfo { node_id: "node-1".to_string(), - name: "Worker Node A".to_string(), + name: "Mac Mini".to_string(), endpoint: "https://127.0.0.1:8400".to_string(), auth_token: None, capabilities: vec![NodeCapability::AgenticLoop], diff --git a/engine/crates/fx-api/src/handlers/git.rs b/engine/crates/fx-api/src/handlers/git.rs index 5b4da36b..7860cc39 100644 --- a/engine/crates/fx-api/src/handlers/git.rs +++ b/engine/crates/fx-api/src/handlers/git.rs @@ -725,7 +725,7 @@ mod tests { hash: "abcdef123456".to_string(), short_hash: "abcdef1".to_string(), message: "feat: add git api".to_string(), - author: "Example Author".to_string(), + author: "Joe".to_string(), timestamp: "2026-03-15T20:00:00Z".to_string(), }], }; @@ -733,7 +733,7 @@ mod tests { let json = serde_json::to_value(response).unwrap(); assert_eq!(json["commits"][0]["hash"], "abcdef123456"); - assert_eq!(json["commits"][0]["author"], "Example Author"); + assert_eq!(json["commits"][0]["author"], "Joe"); } #[test] @@ -779,14 +779,14 @@ mod tests { #[test] fn parse_log_line() { let commit = super::parse_log_line( - "abcdef123456|abcdef1|feat: support pipes | in messages|Example Author|2026-03-15T20:00:00Z", + "abcdef123456|abcdef1|feat: support pipes | in messages|Joe|2026-03-15T20:00:00Z", ) .unwrap(); assert_eq!(commit.hash, "abcdef123456"); assert_eq!(commit.short_hash, "abcdef1"); assert_eq!(commit.message, "feat: support pipes | in messages"); - assert_eq!(commit.author, "Example Author"); + assert_eq!(commit.author, "Joe"); } #[test] diff --git a/engine/crates/fx-api/src/handlers/pairing.rs b/engine/crates/fx-api/src/handlers/pairing.rs index b7e21bca..857ddc73 100644 --- a/engine/crates/fx-api/src/handlers/pairing.rs +++ b/engine/crates/fx-api/src/handlers/pairing.rs @@ -396,14 +396,14 @@ mod phase4_tests { let response = qr_pairing_response( &test_runtime(false), &QrTailscaleStatus { - hostname: Some("node.example.ts.net".to_string()), + hostname: Some("myhost.example.com".to_string()), cert_ready: true, }, ); - assert_eq!(response.display_host, "node.example.ts.net"); + assert_eq!(response.display_host, "myhost.example.com"); assert_eq!(response.transport, "tailscale_https"); assert!(!response.same_network_only); - assert!(response.scheme_url.contains("host=node.example.ts.net")); + assert!(response.scheme_url.contains("host=myhost.example.com")); } #[test] diff --git a/engine/crates/fx-api/src/handlers/phase4.rs b/engine/crates/fx-api/src/handlers/phase4.rs index 87dcfb1d..0b2b4c5d 100644 --- a/engine/crates/fx-api/src/handlers/phase4.rs +++ b/engine/crates/fx-api/src/handlers/phase4.rs @@ -256,7 +256,7 @@ mod tests { installed: true, running: true, logged_in: true, - hostname: Some("node.example.ts.net".to_string()), + hostname: Some("myhost.example.com".to_string()), cert_ready: true, }, }; @@ -268,7 +268,7 @@ mod tests { assert_eq!(json["launchagent"]["loaded"], true); assert_eq!(json["local_server"]["port"], 8400); assert_eq!(json["auth"]["providers_configured"][0], "anthropic"); - assert_eq!(json["tailscale"]["hostname"], "node.example.ts.net"); + assert_eq!(json["tailscale"]["hostname"], "myhost.example.com"); } #[test] diff --git a/engine/crates/fx-api/src/tailscale.rs b/engine/crates/fx-api/src/tailscale.rs index dd1f143c..d6ed1a79 100644 --- a/engine/crates/fx-api/src/tailscale.rs +++ b/engine/crates/fx-api/src/tailscale.rs @@ -108,11 +108,11 @@ mod tests { #[test] fn parse_tailscale_cli_output_returns_cgnat_ip() { - let stdout = b"100.64.0.42\n"; + let stdout = b"198.51.100.1\n"; assert_eq!( parse_tailscale_cli_output(stdout), - Some(IpAddr::V4(Ipv4Addr::new(100, 64, 0, 42))) + Some(IpAddr::V4(Ipv4Addr::new(100, 100, 100, 1))) ); } @@ -132,17 +132,17 @@ mod tests { #[test] fn parse_macos_ifconfig_line_extracts_cgnat_ip() { - let line = "inet 100.64.0.43 --> 100.64.0.43 netmask 0xffffffff"; + let line = "inet 198.51.100.63 --> 198.51.100.63 netmask 0xffffffff"; assert_eq!( extract_ip_from_line(line), - Some(IpAddr::V4(Ipv4Addr::new(100, 64, 0, 43))) + Some(IpAddr::V4(Ipv4Addr::new(100, 101, 20, 63))) ); } #[test] fn parse_macos_ifconfig_line_without_inet_prefix_returns_none() { - let line = "100.64.0.43 --> 100.64.0.43 netmask 0xffffffff"; + let line = "198.51.100.63 --> 198.51.100.63 netmask 0xffffffff"; assert_eq!(extract_ip_from_line(line), None); } @@ -157,11 +157,12 @@ mod tests { #[test] fn linux_ip_output_still_parsed_correctly() { - let text = "7: tailscale0 inet 100.64.0.42/32 brd 100.64.0.42 scope global tailscale0"; + let text = + "7: tailscale0 inet 198.51.100.1/32 brd 198.51.100.1 scope global tailscale0"; assert_eq!( find_cgnat_ip(text), - Some(IpAddr::V4(Ipv4Addr::new(100, 64, 0, 42))) + Some(IpAddr::V4(Ipv4Addr::new(100, 100, 100, 1))) ); } } diff --git a/engine/crates/fx-api/src/tests.rs b/engine/crates/fx-api/src/tests.rs index 81233894..d2a4914c 100644 --- a/engine/crates/fx-api/src/tests.rs +++ b/engine/crates/fx-api/src/tests.rs @@ -380,7 +380,7 @@ async fn mock_status() -> Json { model: "test-model".to_string(), skills: vec!["skill-a".to_string()], memory_entries: 10, - tailscale_ip: Some("100.64.0.30".to_string()), + tailscale_ip: Some("192.0.2.10".to_string()), config: None, }) } @@ -425,7 +425,7 @@ fn tailscale_ip_accepts_valid_range() { Ipv4Addr::new(100, 127, 255, 255) ))); assert!(crate::tailscale::is_tailscale_ip(&IpAddr::V4( - Ipv4Addr::new(100, 64, 0, 42) + Ipv4Addr::new(100, 100, 100, 2) ))); } @@ -450,14 +450,14 @@ fn tailscale_ip_rejects_ipv6() { #[test] fn listen_targets_bind_localhost_and_tailscale() { - let plan = listen_targets(8400, Some(IpAddr::V4(Ipv4Addr::new(100, 64, 0, 42)))); + let plan = listen_targets(8400, Some(IpAddr::V4(Ipv4Addr::new(100, 100, 100, 2)))); let tailscale = plan.tailscale.expect("tailscale target"); assert_eq!(plan.local.addr, SocketAddr::from(([127, 0, 0, 1], 8400))); assert_eq!(plan.local.label, "local"); assert_eq!( tailscale.addr, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(100, 64, 0, 42)), 8400) + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(100, 100, 100, 2)), 8400) ); assert_eq!(tailscale.label, "Tailscale"); } @@ -515,7 +515,7 @@ fn startup_target_lines_use_https_for_tailscale_when_enabled() { label: "local", }, Some(ListenTarget { - addr: SocketAddr::from(([100, 64, 0, 42], 8400)), + addr: SocketAddr::from(([192, 0, 2, 1], 8400)), label: "Tailscale", }), true, @@ -523,7 +523,7 @@ fn startup_target_lines_use_https_for_tailscale_when_enabled() { assert_eq!(lines[0], "Fawx API listening on:"); assert_eq!(lines[1], " http://127.0.0.1:8400 (local)"); - assert_eq!(lines[2], " https://100.64.0.42:8400 (Tailscale)"); + assert_eq!(lines[2], " https://192.0.2.1:8400 (Tailscale)"); } #[test] @@ -534,14 +534,14 @@ fn startup_target_lines_use_http_for_tailscale_when_tls_disabled() { label: "local", }, Some(ListenTarget { - addr: SocketAddr::from(([100, 64, 0, 42], 8400)), + addr: SocketAddr::from(([192, 0, 2, 1], 8400)), label: "Tailscale", }), false, ); assert_eq!(lines[0], "Fawx HTTP API listening on:"); - assert_eq!(lines[2], " http://100.64.0.42:8400 (Tailscale)"); + assert_eq!(lines[2], " http://192.0.2.1:8400 (Tailscale)"); } #[tokio::test] @@ -555,7 +555,7 @@ async fn tailscale_bind_failure_falls_back_to_localhost_server() { .expect("bind localhost"); let local_addr = local_listener.local_addr().expect("local addr"); let tailscale_target = ListenTarget { - addr: SocketAddr::from(([100, 64, 0, 42], 8400)), + addr: SocketAddr::from(([192, 0, 2, 1], 8400)), label: "Tailscale", }; let listeners = BoundListeners { @@ -615,9 +615,9 @@ async fn wait_for_server_pair_shuts_down_peer_when_one_server_exits() { #[test] fn extract_ip_parses_ip_addr_output() { - let line = "4: tailscale0 inet 100.64.0.42/32 scope global tailscale0"; + let line = "4: tailscale0 inet 198.51.100.2/32 scope global tailscale0"; let ip = crate::tailscale::extract_ip_from_line(line); - assert_eq!(ip, Some(IpAddr::V4(Ipv4Addr::new(100, 64, 0, 42)))); + assert_eq!(ip, Some(IpAddr::V4(Ipv4Addr::new(100, 100, 100, 2)))); } #[test] @@ -715,13 +715,13 @@ fn status_response_has_expected_fields() { model: "claude-3".to_string(), skills: vec!["read_file".to_string()], memory_entries: 42, - tailscale_ip: Some("100.64.0.20".to_string()), + tailscale_ip: Some("192.0.2.1".to_string()), config: None, }; let json: serde_json::Value = serde_json::from_str(&serde_json::to_string(&response).expect("serialize")).expect("parse"); assert_eq!(json["status"], "ok"); - assert_eq!(json["tailscale_ip"], "100.64.0.20"); + assert_eq!(json["tailscale_ip"], "192.0.2.1"); assert_eq!(json["memory_entries"], 42); assert!(json["skills"].is_array()); } @@ -805,7 +805,7 @@ async fn status_endpoint_returns_ok() { let body = resp.into_body().collect().await.expect("body").to_bytes(); let json: serde_json::Value = serde_json::from_slice(&body).expect("json"); assert_eq!(json["status"], "ok"); - assert_eq!(json["tailscale_ip"], "100.64.0.30"); + assert_eq!(json["tailscale_ip"], "192.0.2.10"); assert!(json["skills"].is_array()); } @@ -1976,8 +1976,8 @@ mod routing_and_status { #[tokio::test] async fn get_devices_returns_device_list() { let mut devices = DeviceStore::new(); - let (_, first) = devices.create_device("Example MacBook"); - let (_, second) = devices.create_device("Example iPhone"); + let (_, first) = devices.create_device("Joe's MacBook"); + let (_, second) = devices.create_device("Joe's iPhone"); let app = build_router(test_state_with_devices(devices), None); let response = app @@ -1996,7 +1996,7 @@ mod routing_and_status { #[tokio::test] async fn get_devices_excludes_token_hash() { let mut devices = DeviceStore::new(); - let _ = devices.create_device("Example MacBook"); + let _ = devices.create_device("Joe's MacBook"); let app = build_router(test_state_with_devices(devices), None); let response = app @@ -2011,7 +2011,7 @@ mod routing_and_status { #[tokio::test] async fn delete_device_revokes_token() { let mut devices = DeviceStore::new(); - let (raw_token, device) = devices.create_device("Example MacBook"); + let (raw_token, device) = devices.create_device("Joe's MacBook"); let app = build_router(test_state_with_devices(devices), None); let before_delete = Request::builder() @@ -2443,7 +2443,7 @@ allowed_chat_ids = [123] let temp = TempDir::new().expect("tempdir"); let mut manager = FleetManager::init(temp.path()).expect("fleet init"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); let app = build_router( test_state(None, Vec::new()), @@ -2455,7 +2455,7 @@ allowed_chat_ids = [123] .header("content-type", "application/json") .body(Body::from( serde_json::to_vec(&fx_fleet::FleetRegistrationRequest { - node_name: "node-a".to_string(), + node_name: "macmini".to_string(), bearer_token: token.secret, capabilities: vec!["agentic_loop".to_string()], rust_version: None, diff --git a/engine/crates/fx-channel-telegram/src/lib.rs b/engine/crates/fx-channel-telegram/src/lib.rs index 33e39261..b2c3633e 100644 --- a/engine/crates/fx-channel-telegram/src/lib.rs +++ b/engine/crates/fx-channel-telegram/src/lib.rs @@ -1043,7 +1043,7 @@ mod tests { "message": {{ "message_id": 42, "chat": {{ "id": {chat_id} }}, - "from": {{ "first_name": "Example" }}, + "from": {{ "first_name": "Joe" }}, "text": "{text}" }} }}"# @@ -1065,7 +1065,7 @@ mod tests { assert_eq!(result.chat_id, 12345); assert_eq!(result.text, "hello bot"); assert_eq!(result.message_id, 42); - assert_eq!(result.from_name.as_deref(), Some("Example")); + assert_eq!(result.from_name.as_deref(), Some("Joe")); } #[test] @@ -1164,7 +1164,7 @@ mod tests { "message": { "message_id": 44, "chat": { "id": 12345 }, - "from": { "first_name": "Example" }, + "from": { "first_name": "Joe" }, "photo": [ {"file_id": "thumb", "width": 90, "height": 90}, {"file_id": "medium", "width": 320, "height": 240}, diff --git a/engine/crates/fx-cli/src/commands/devices.rs b/engine/crates/fx-cli/src/commands/devices.rs index f236adb2..5bcbc56c 100644 --- a/engine/crates/fx-cli/src/commands/devices.rs +++ b/engine/crates/fx-cli/src/commands/devices.rs @@ -277,7 +277,7 @@ mod tests { let response = DevicesResponse { devices: vec![DeviceInfo { id: "dev-a1b2c3".to_string(), - device_name: "Example MacBook".to_string(), + device_name: "Joe's MacBook".to_string(), created_at: 1_773_400_000, last_used_at: 1_773_435_000, }], @@ -289,7 +289,7 @@ mod tests { .expect("device JSON should parse"); assert_eq!(json["devices"][0]["id"], "dev-a1b2c3"); - assert_eq!(json["devices"][0]["device_name"], "Example MacBook"); + assert_eq!(json["devices"][0]["device_name"], "Joe's MacBook"); assert_eq!(json["devices"][0]["created_at"], 1_773_400_000); assert_eq!(json["devices"][0]["last_used_at"], 1_773_435_000); } @@ -299,7 +299,7 @@ mod tests { let response = DevicesResponse { devices: vec![DeviceInfo { id: "dev-a1b2c3".to_string(), - device_name: "Example MacBook".to_string(), + device_name: "Joe's MacBook".to_string(), created_at: 1_700_000_000, last_used_at: 1_700_000_300, }], diff --git a/engine/crates/fx-cli/src/commands/fleet.rs b/engine/crates/fx-cli/src/commands/fleet.rs index 6a6b70df..1c33d1fe 100644 --- a/engine/crates/fx-cli/src/commands/fleet.rs +++ b/engine/crates/fx-cli/src/commands/fleet.rs @@ -24,7 +24,7 @@ pub enum FleetCommands { Init, /// Add a worker node to the fleet Add { - /// Node name (e.g., "node-a") + /// Node name (e.g., "macmini") name: String, /// Tailscale IP address #[arg(long)] @@ -35,7 +35,7 @@ pub enum FleetCommands { }, /// Join a fleet as a worker node Join { - /// Primary node endpoint (e.g., 203.0.113.20:8400) + /// Primary node endpoint (e.g., 192.0.2.1:8400) primary: String, /// Bearer token from `fawx fleet add` #[arg(long)] @@ -505,7 +505,7 @@ mod tests { #[test] fn parsed_hostname_trims_trailing_newline() { - assert_eq!(parsed_hostname(b"node-a\n"), Some("node-a".to_string())); + assert_eq!(parsed_hostname(b"macmini\n"), Some("macmini".to_string())); } #[test] @@ -549,8 +549,8 @@ mod tests { let mut output = Vec::new(); execute_fleet_command( &FleetCommands::Add { - name: "node-a".to_string(), - ip: "203.0.113.10".to_string(), + name: "macmini".to_string(), + ip: "198.51.100.19".to_string(), port: 8400, }, &fleet_dir, @@ -563,11 +563,11 @@ mod tests { let tokens = read_tokens(&fleet_dir); let token = tokens.first().expect("token should exist"); - assert!(output.contains("✓ Node \"node-a\" registered")); + assert!(output.contains("✓ Node \"macmini\" registered")); assert!(output.contains("✓ Token generated")); assert!(output.contains("Join command (run on the worker):")); assert!(output.contains(&format!( - "fawx fleet join 203.0.113.10:8400 --token {}", + "fawx fleet join 198.51.100.19:8400 --token {}", token.secret ))); } @@ -583,8 +583,8 @@ mod tests { let mut first_output = Vec::new(); execute_fleet_command( &FleetCommands::Add { - name: "node-a".to_string(), - ip: "203.0.113.10".to_string(), + name: "macmini".to_string(), + ip: "198.51.100.19".to_string(), port: 8400, }, &fleet_dir, @@ -595,8 +595,8 @@ mod tests { let result = execute_fleet_command( &FleetCommands::Add { - name: "node-a".to_string(), - ip: "203.0.113.11".to_string(), + name: "macmini".to_string(), + ip: "198.51.100.20".to_string(), port: 8400, }, &fleet_dir, @@ -612,7 +612,7 @@ mod tests { let mut server = TestRegisterServer::spawn(TestRegisterResponse { status: StatusCode::OK, body: FleetRegistrationResponse { - node_id: "node-a-a1b2c3".to_string(), + node_id: "macmini-a1b2c3".to_string(), accepted: true, message: "registered".to_string(), }, @@ -650,12 +650,12 @@ mod tests { .json .capabilities .contains(&"agentic_loop".to_string())); - assert_eq!(identity.node_id, "node-a-a1b2c3"); + assert_eq!(identity.node_id, "macmini-a1b2c3"); assert_eq!(identity.primary_endpoint, server.base_url); assert_eq!(identity.bearer_token, token); assert!(identity.registered_at_ms > 0); assert!(output.contains("✓ Connected to primary at")); - assert!(output.contains("✓ Registered as node \"node-a-a1b2c3\"")); + assert!(output.contains("✓ Registered as node \"macmini-a1b2c3\"")); assert!(output.contains("✓ Identity saved to")); } @@ -665,13 +665,13 @@ mod tests { let fleet_dir = temp_dir.path().join("fleet"); let mut manager = FleetManager::init(&fleet_dir).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); let mut output = Vec::new(); execute_fleet_command( &FleetCommands::Remove { - name: "node-a".to_string(), + name: "macmini".to_string(), }, &fleet_dir, &mut output, @@ -682,7 +682,7 @@ mod tests { let reloaded_manager = FleetManager::load(&fleet_dir).expect("fleet should load"); let output = String::from_utf8(output).expect("utf8"); - assert!(output.contains("✓ Node \"node-a\" removed and token revoked")); + assert!(output.contains("✓ Node \"macmini\" removed and token revoked")); assert_eq!(reloaded_manager.verify_bearer(&token.secret), None); assert!(reloaded_manager.list_nodes().is_empty()); } @@ -738,16 +738,16 @@ mod tests { let mut manager = FleetManager::load(&fleet_dir).expect("fleet should load"); manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("first node should add"); manager - .add_node("node-b", "203.0.113.11", 8400) + .add_node("macbook", "198.51.100.20", 8400) .expect("second node should add"); let now_ms = current_time_ms(); let mut nodes = read_nodes(&fleet_dir); for node in &mut nodes { - if node.name == "node-b" { + if node.name == "macbook" { node.status = NodeStatus::Online; node.last_heartbeat_ms = now_ms.saturating_sub(65_000); } @@ -761,10 +761,10 @@ mod tests { let output = String::from_utf8(output).expect("utf8"); assert!(output.contains("Fleet Nodes:")); - assert!(output.contains("node-b")); - assert!(output.contains("node-a")); - assert!(output.contains("203.0.113.10:8400")); - assert!(output.contains("203.0.113.11:8400")); + assert!(output.contains("macbook")); + assert!(output.contains("macmini")); + assert!(output.contains("198.51.100.19:8400")); + assert!(output.contains("198.51.100.20:8400")); assert!(output.contains("online")); assert!(output.contains("offline")); assert!(output.contains("1m ago")); @@ -777,13 +777,13 @@ mod tests { let fleet_dir = temp_dir.path().join("fleet"); let mut manager = FleetManager::init(&fleet_dir).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); let nodes = manager.list_nodes(); let output = render_list_output(&nodes, current_time_ms()); - assert!(output.contains("node-a")); + assert!(output.contains("macmini")); assert!(!output.contains(&token.secret)); } diff --git a/engine/crates/fx-cli/src/commands/reset.rs b/engine/crates/fx-cli/src/commands/reset.rs index 48e89b51..ea2bd51d 100644 --- a/engine/crates/fx-cli/src/commands/reset.rs +++ b/engine/crates/fx-cli/src/commands/reset.rs @@ -641,7 +641,7 @@ mod tests { #[test] fn all_reset_preserves_credentials_while_resetting_the_rest() { let fixture = ResetFixture::new( - "[http]\nbearer_token = \"keep-me\"\n\n[telegram]\nbot_token = \"keep-bot\"\n\n[[fleet.nodes]]\nid = \"node-1\"\nname = \"Node One\"\nendpoint = \"https://node.example\"\nauth_token = \"keep-token\"\nssh_key = \"~/.ssh/node-1\"\ncapabilities = [\"agentic_loop\"]\naddress = \"203.0.113.30\"\nuser = \"deploy\"\n", + "[http]\nbearer_token = \"keep-me\"\n\n[telegram]\nbot_token = \"keep-bot\"\n\n[[fleet.nodes]]\nid = \"node-1\"\nname = \"Node One\"\nendpoint = \"https://node.example\"\nauth_token = \"keep-token\"\nssh_key = \"~/.ssh/node-1\"\ncapabilities = [\"agentic_loop\"]\naddress = \"192.0.2.10\"\nuser = \"deploy\"\n", ); write_dir_file(&fixture.layout.data_dir.join("memory"), "memory.json"); write_dir_file(&fixture.layout.embedding_model_dir, "index.bin"); diff --git a/engine/crates/fx-cli/src/commands/tailscale.rs b/engine/crates/fx-cli/src/commands/tailscale.rs index 2961d874..bd02618f 100644 --- a/engine/crates/fx-cli/src/commands/tailscale.rs +++ b/engine/crates/fx-cli/src/commands/tailscale.rs @@ -106,10 +106,10 @@ mod tests { #[test] fn parse_dns_name_trims_trailing_dot() { - let hostname = parse_dns_name(br#"{"Self":{"DNSName":"node.example.ts.net."}}"#) + let hostname = parse_dns_name(br#"{"Self":{"DNSName":"fawx.example.com."}}"#) .expect("hostname should parse"); - assert_eq!(hostname, "node.example.ts.net"); + assert_eq!(hostname, "fawx.example.com"); } #[test] diff --git a/engine/crates/fx-cli/src/startup.rs b/engine/crates/fx-cli/src/startup.rs index e7b71a59..deb33a39 100644 --- a/engine/crates/fx-cli/src/startup.rs +++ b/engine/crates/fx-cli/src/startup.rs @@ -2055,12 +2055,12 @@ mod tests { fn test_fleet_node_config() -> fx_config::NodeConfig { fx_config::NodeConfig { id: "mac-mini".to_string(), - name: "Worker Node A".to_string(), + name: "Mac Mini".to_string(), endpoint: Some("https://10.0.0.5:8400".to_string()), auth_token: Some("token".to_string()), capabilities: vec!["agentic_loop".to_string(), "test".to_string()], address: Some("10.0.0.5".to_string()), - user: Some("builder".to_string()), + user: Some("joseph".to_string()), ssh_key: Some("~/.ssh/id_ed25519".to_string()), } } diff --git a/engine/crates/fx-config/src/lib.rs b/engine/crates/fx-config/src/lib.rs index ba81d696..4e992ba5 100644 --- a/engine/crates/fx-config/src/lib.rs +++ b/engine/crates/fx-config/src/lib.rs @@ -2057,9 +2057,9 @@ max_iterations = 10 #[test] fn tilde_expansion_does_not_expand_tilde_user() { - let path = PathBuf::from("~user/.config"); + let path = PathBuf::from("~joe/.config"); let expanded = expand_tilde(&path); - assert_eq!(expanded, PathBuf::from("~user/.config")); + assert_eq!(expanded, PathBuf::from("~joe/.config")); } #[test] diff --git a/engine/crates/fx-consensus/src/remote_workspace.rs b/engine/crates/fx-consensus/src/remote_workspace.rs index 47e42322..cbe9eb5e 100644 --- a/engine/crates/fx-consensus/src/remote_workspace.rs +++ b/engine/crates/fx-consensus/src/remote_workspace.rs @@ -407,9 +407,9 @@ mod tests { #[test] fn remote_eval_target_parses_user_host_and_path() { - let target: RemoteEvalTarget = "builder@example.com:/srv/fawx".parse().expect("target"); + let target: RemoteEvalTarget = "deploy@example.com:/srv/fawx".parse().expect("target"); - assert_eq!(target.ssh_user, "builder"); + assert_eq!(target.ssh_user, "deploy"); assert_eq!(target.ssh_host, "example.com"); assert_eq!(target.remote_project_dir, "/srv/fawx"); } @@ -425,7 +425,7 @@ mod tests { #[test] fn ssh_command_format_builds_expected_args() { - let spec = ssh_command_spec("builder", "203.0.113.20", "cd '/srv/fawx' && cargo test"); + let spec = ssh_command_spec("deploy", "192.0.2.1", "cd '/srv/fawx' && cargo test"); assert_eq!(spec.program, "ssh"); assert_eq!( @@ -441,7 +441,7 @@ mod tests { "ServerAliveInterval=15", "-o", "ServerAliveCountMax=3", - "builder@203.0.113.20", + "deploy@192.0.2.1", "cd '/srv/fawx' && cargo test", ] ); @@ -450,8 +450,8 @@ mod tests { #[test] fn patch_application_builds_scp_and_git_apply_commands() { let scp = scp_command_spec( - "builder", - "203.0.113.20", + "deploy", + "192.0.2.1", Path::new("/tmp/local.patch"), "/tmp/remote.patch", ); @@ -468,7 +468,7 @@ mod tests { "-o", "ConnectTimeout=30", "/tmp/local.patch", - "builder@203.0.113.20:/tmp/remote.patch", + "deploy@192.0.2.1:/tmp/remote.patch", ] ); assert_eq!( diff --git a/engine/crates/fx-fleet/src/http.rs b/engine/crates/fx-fleet/src/http.rs index 953c8f56..425d39bb 100644 --- a/engine/crates/fx-fleet/src/http.rs +++ b/engine/crates/fx-fleet/src/http.rs @@ -527,7 +527,7 @@ mod tests { fn sample_registration_request() -> FleetRegistrationRequest { FleetRegistrationRequest { - node_name: "node-a-01".to_string(), + node_name: "macmini-01".to_string(), bearer_token: "node-secret".to_string(), capabilities: vec!["generate".to_string(), "evaluate".to_string()], rust_version: Some("1.86.0".to_string()), @@ -539,7 +539,7 @@ mod tests { fn sample_heartbeat() -> FleetHeartbeat { FleetHeartbeat { - node_id: "node-a-01".to_string(), + node_id: "macmini-01".to_string(), status: WorkerState::Idle, current_task: None, } @@ -547,7 +547,7 @@ mod tests { fn sample_worker_status() -> FleetWorkerStatus { FleetWorkerStatus { - node_id: "node-a-01".to_string(), + node_id: "macmini-01".to_string(), status: WorkerState::Busy, current_task: Some("exp-001".to_string()), uptime_seconds: 42, @@ -670,7 +670,7 @@ mod tests { #[test] fn registration_debug_redacts_bearer_token() { let request = FleetRegistrationRequest { - node_name: "node-a-01".to_string(), + node_name: "macmini-01".to_string(), bearer_token: "node-secret".to_string(), capabilities: vec!["generate".to_string()], rust_version: None, diff --git a/engine/crates/fx-fleet/src/identity.rs b/engine/crates/fx-fleet/src/identity.rs index 3c912854..923faeed 100644 --- a/engine/crates/fx-fleet/src/identity.rs +++ b/engine/crates/fx-fleet/src/identity.rs @@ -45,8 +45,8 @@ mod tests { fn sample_identity() -> FleetIdentity { FleetIdentity { - node_id: "node-a-a1b2c3".to_string(), - primary_endpoint: "http://203.0.113.20:8400".to_string(), + node_id: "macmini-a1b2c3".to_string(), + primary_endpoint: "http://192.0.2.1:8400".to_string(), bearer_token: "tok_secret_123".to_string(), registered_at_ms: 12345, } diff --git a/engine/crates/fx-fleet/src/lib.rs b/engine/crates/fx-fleet/src/lib.rs index 856dec22..b616b53c 100644 --- a/engine/crates/fx-fleet/src/lib.rs +++ b/engine/crates/fx-fleet/src/lib.rs @@ -29,7 +29,7 @@ pub struct NodeInfo { pub node_id: String, /// Human-readable name. pub name: String, - /// HTTP API endpoint (e.g., "https://203.0.113.5:8400"). + /// HTTP API endpoint (e.g., "https://192.0.2.5:8400"). pub endpoint: String, /// Bearer token for authenticating with this node. pub auth_token: Option, @@ -436,19 +436,19 @@ mod tests { fn node_info_from_config_maps_fleet_fields() { let config = NodeConfig { id: "mac-mini".to_string(), - name: "Worker Node A".to_string(), + name: "Mac Mini".to_string(), endpoint: Some("https://10.0.0.5:8400".to_string()), auth_token: Some("token".to_string()), capabilities: vec!["agentic_loop".to_string(), "test".to_string()], address: Some("10.0.0.5".to_string()), - user: Some("builder".to_string()), + user: Some("joseph".to_string()), ssh_key: Some("~/.ssh/id_ed25519".to_string()), }; let node = NodeInfo::from(&config); assert_eq!(node.node_id, "mac-mini"); - assert_eq!(node.name, "Worker Node A"); + assert_eq!(node.name, "Mac Mini"); assert_eq!(node.endpoint, "https://10.0.0.5:8400"); assert_eq!(node.auth_token.as_deref(), Some("token")); assert_eq!( @@ -462,7 +462,7 @@ mod tests { assert_eq!(node.last_heartbeat_ms, 0); assert!(node.registered_at_ms > 0); assert_eq!(node.address.as_deref(), Some("10.0.0.5")); - assert_eq!(node.ssh_user.as_deref(), Some("builder")); + assert_eq!(node.ssh_user.as_deref(), Some("joseph")); assert_eq!(node.ssh_key.as_deref(), Some("~/.ssh/id_ed25519")); } diff --git a/engine/crates/fx-fleet/src/manager.rs b/engine/crates/fx-fleet/src/manager.rs index 5e2c2360..307dfa86 100644 --- a/engine/crates/fx-fleet/src/manager.rs +++ b/engine/crates/fx-fleet/src/manager.rs @@ -448,7 +448,7 @@ mod tests { let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("Worker Node A", "203.0.113.10", 8400) + .add_node("Mac Mini", "198.51.100.19", 8400) .expect("node should add"); let node = manager .list_nodes() @@ -457,10 +457,10 @@ mod tests { assert_eq!(token.node_id, node.node_id); assert_ne!(token.node_id, node.name); - assert!(token.node_id.starts_with("worker-node-a-")); - assert_eq!(node.name, "Worker Node A"); - assert_eq!(node.endpoint, "https://203.0.113.10:8400"); - assert_eq!(node.address.as_deref(), Some("203.0.113.10")); + assert!(token.node_id.starts_with("mac-mini-")); + assert_eq!(node.name, "Mac Mini"); + assert_eq!(node.endpoint, "https://198.51.100.19:8400"); + assert_eq!(node.address.as_deref(), Some("198.51.100.19")); assert_eq!(node.status, NodeStatus::Offline); } @@ -470,9 +470,9 @@ mod tests { let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("first node should add"); - let result = manager.add_node("node-a", "203.0.113.11", 8400); + let result = manager.add_node("macmini", "198.51.100.20", 8400); assert!(matches!(result, Err(FleetError::DuplicateNode))); } @@ -482,11 +482,11 @@ mod tests { let temp_dir = TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); manager - .remove_node("node-a") + .remove_node("macmini") .expect("node should remove cleanly"); assert!(manager.list_nodes().is_empty()); @@ -510,7 +510,7 @@ mod tests { let temp_dir = TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("Worker Node A", "203.0.113.10", 8400) + .add_node("Mac Mini", "198.51.100.19", 8400) .expect("node should add"); let verified = manager.verify_bearer(&token.secret); @@ -523,10 +523,10 @@ mod tests { let temp_dir = TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); manager - .remove_node("node-a") + .remove_node("macmini") .expect("node should remove cleanly"); let verified = manager.verify_bearer(&token.secret); @@ -547,7 +547,7 @@ mod tests { let temp_dir = TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); let node = manager @@ -571,7 +571,7 @@ mod tests { let temp_dir = TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); manager @@ -592,7 +592,7 @@ mod tests { let temp_dir = TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); let token = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("node should add"); manager .record_worker_heartbeat(&token.node_id, NodeStatus::Busy, 100) @@ -617,19 +617,19 @@ mod tests { let fleet_dir = temp_dir.path().join("fleet"); let mut manager = FleetManager::init(&fleet_dir).expect("fleet should initialize"); let active = manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("first node should add"); let revoked = manager - .add_node("node-b", "203.0.113.11", 8401) + .add_node("macbook", "198.51.100.20", 8401) .expect("second node should add"); manager - .remove_node("node-b") + .remove_node("macbook") .expect("node should remove cleanly"); let loaded = FleetManager::load(&fleet_dir).expect("fleet should load"); let node_names = sorted_node_names(loaded.list_nodes()); - assert_eq!(node_names, vec!["node-a".to_string()]); + assert_eq!(node_names, vec!["macmini".to_string()]); assert_eq!( loaded.verify_bearer(&active.secret).as_deref(), Some(active.node_id.as_str()) @@ -648,7 +648,7 @@ mod tests { let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); manager - .add_node("Worker Node A", "203.0.113.10", 8400) + .add_node("Mac Mini", "198.51.100.19", 8400) .expect("node should add"); assert_private_permissions(&nodes_path(temp_dir.path())); @@ -682,15 +682,15 @@ mod tests { let temp_dir = TempDir::new().expect("tempdir should create"); let mut manager = FleetManager::init(temp_dir.path()).expect("fleet should initialize"); manager - .add_node("node-a", "203.0.113.10", 8400) + .add_node("macmini", "198.51.100.19", 8400) .expect("first node should add"); manager - .add_node("node-b", "203.0.113.11", 8401) + .add_node("macbook", "198.51.100.20", 8401) .expect("second node should add"); let names = sorted_node_names(manager.list_nodes()); - assert_eq!(names, vec!["node-a".to_string(), "node-b".to_string()]); + assert_eq!(names, vec!["macbook".to_string(), "macmini".to_string()]); } fn sorted_node_names(nodes: Vec<&NodeInfo>) -> Vec { diff --git a/engine/crates/fx-kernel/src/context_manager.rs b/engine/crates/fx-kernel/src/context_manager.rs index 211803df..8c509b12 100644 --- a/engine/crates/fx-kernel/src/context_manager.rs +++ b/engine/crates/fx-kernel/src/context_manager.rs @@ -405,7 +405,7 @@ mod tests { version: 1, }], identity_context: IdentityContext { - user_name: Some("Example User".to_owned()), + user_name: Some("Joe".to_owned()), preferences, personality_traits: vec!["focused".to_owned(), "concise".to_owned()], }, diff --git a/engine/crates/fx-kernel/src/loop_engine.rs b/engine/crates/fx-kernel/src/loop_engine.rs index 7b5b731c..9166aac4 100644 --- a/engine/crates/fx-kernel/src/loop_engine.rs +++ b/engine/crates/fx-kernel/src/loop_engine.rs @@ -1343,7 +1343,7 @@ Never narrate your process, hedge with qualifiers, or reference tool mechanics. Avoid filler openers like \"I notice\", \"I can see that\", \"Based on the results\", \ \"It appears that\", \"Let me\", or \"I aim to\". Just answer the question. \ If the user makes a statement (not a question), acknowledge it naturally and briefly. \ -If a tool call stores data (like memory_write), confirm the action in one short sentence. You are Fawx, a TUI-first agentic engine built in Rust. You were created by Fawx AI. Your architecture separates an immutable safety kernel from a loadable intelligence layer: the kernel enforces hard security boundaries that you cannot override at runtime. You are designed to be self-extending through a WASM plugin system. \ +If a tool call stores data (like memory_write), confirm the action in one short sentence. You are Fawx, a TUI-first agentic engine built in Rust. You were created by Joe. Your architecture separates an immutable safety kernel from a loadable intelligence layer: the kernel enforces hard security boundaries that you cannot override at runtime. You are designed to be self-extending through a WASM plugin system. \ Your source code is at ~/fawx. Your config is at ~/.fawx/config.toml. \ Your data (conversations, memory) is at the data_dir set in config. \ Your conversation history is stored as JSONL files in the data directory. \ @@ -1623,7 +1623,7 @@ impl LoopEngine { let mut state = CycleState::default(); let stream = stream_callback.map_or_else(CycleStream::disabled, CycleStream::enabled); - // Single pass — all tool chaining happens inside act_with_tools. + // Multi-pass: loops until model stops using tools. self.iteration_count = 1; self.refresh_iteration_state(); @@ -1635,7 +1635,7 @@ impl LoopEngine { } stream.phase(Phase::Perceive); - let processed = self.perceive(&perception).await?; + let mut processed = self.perceive(&perception).await?; let reason_cost = self.estimate_reasoning_cost(&processed); if let Some(result) = self.budget_terminal(reason_cost, None) { return Ok(self.finish_streaming_result(result, stream)); @@ -1645,49 +1645,126 @@ impl LoopEngine { let response = self.reason(&processed, llm, stream).await?; self.record_reasoning_cost(reason_cost, &mut state); - let decision = self.decide(&response).await?; + let mut decision = self.decide(&response).await?; if let Some(result) = self.budget_terminal(self.estimate_action_cost(&decision), None) { return Ok(self.finish_streaming_result(result, stream)); } - stream.phase(Phase::Act); - let action = self - .act(&decision, llm, &processed.context_window, stream) - .await?; + loop { + stream.phase(Phase::Act); + let action = self + .act(&decision, llm, &processed.context_window, stream) + .await?; - // Budget accounting for non-tool actions. - if action.tool_results.is_empty() { - let action_cost = self.action_cost_from_result(&action); - if let Some(result) = - self.budget_terminal(action_cost, Some(action.response_text.clone())) + // Budget accounting for non-tool actions. + if action.tool_results.is_empty() { + let action_cost = self.action_cost_from_result(&action); + if let Some(result) = + self.budget_terminal(action_cost, Some(action.response_text.clone())) + { + return Ok(self.finish_budget_exhausted(result, llm, stream).await); + } + self.budget.record(&action_cost); + } else if let Some(result) = + self.budget_terminal(ActionCost::default(), Some(action.response_text.clone())) { return Ok(self.finish_budget_exhausted(result, llm, stream).await); } - self.budget.record(&action_cost); - } else if let Some(result) = - self.budget_terminal(ActionCost::default(), Some(action.response_text.clone())) - { - return Ok(self.finish_budget_exhausted(result, llm, stream).await); - } - state.tokens.accumulate(action.tokens_used); - self.update_tool_turns(&action); + state.tokens.accumulate(action.tokens_used); + self.update_tool_turns(&action); - if let Some(result) = self.check_cancellation(Some(action.response_text.clone())) { - return Ok(self.finish_streaming_result(result, stream)); - } + if let Some(result) = self.check_cancellation(Some(action.response_text.clone())) { + return Ok(self.finish_streaming_result(result, stream)); + } - self.emit_action_observations(&action); + self.emit_action_observations(&action); + + // CONTINUATION CHECK: if tools were used, the model may have more work. + // Re-prompt to let it decide. If no tools were used, it's done. + if action.tool_results.is_empty() { + // Text-only response, no tools involved. Model is done. + return Ok(self.finish_streaming_result( + LoopResult::Complete { + response: action.response_text, + iterations: self.iteration_count, + tokens_used: state.tokens, + signals: Vec::new(), + }, + stream, + )); + } - Ok(self.finish_streaming_result( - LoopResult::Complete { - response: action.response_text, - iterations: self.iteration_count, - tokens_used: state.tokens, - signals: Vec::new(), - }, - stream, - )) + // Tools were used. Check max before incrementing so the + // reported iteration count is accurate (not inflated by 1). + if self.iteration_count >= self.max_iterations { + // Safety cap reached. Return what we have. + return Ok(self.finish_streaming_result( + LoopResult::Complete { + response: action.response_text, + iterations: self.iteration_count, + tokens_used: state.tokens, + signals: Vec::new(), + }, + stream, + )); + } + self.iteration_count += 1; + + self.refresh_iteration_state(); + + // Append a summary of what happened to the context window so + // the next reason() call sees the model's tool results. Without + // this the model would be re-prompted with stale context. + // NOTE: each continuation iteration adds one assistant message. + // Bounded by max_iterations (default 10), so growth is small. + // + // We build a compact assistant message with the synthesis text + // (which already summarizes tool outputs) rather than replaying + // every tool call/result message, because act_with_tools may + // have run multiple inner rounds with different call IDs that + // don't map 1:1 to the original Decision::UseTools calls. + if !action.response_text.is_empty() { + processed + .context_window + .push(Message::assistant(action.response_text.clone())); + } else { + // Tools ran but no synthesis text — include tool names so the + // model knows which tools executed when deciding next steps. + let tool_names: Vec<&str> = action + .tool_results + .iter() + .map(|r| r.tool_name.as_str()) + .collect(); + let placeholder = if tool_names.is_empty() { + "Tool execution completed.".to_string() + } else { + format!("Tool execution completed: {}", tool_names.join(", ")) + }; + processed + .context_window + .push(Message::assistant(placeholder)); + } + + let reason_cost = self.estimate_reasoning_cost(&processed); + if let Some(result) = + self.budget_terminal(reason_cost, Some(action.response_text.clone())) + { + return Ok(self.finish_budget_exhausted(result, llm, stream).await); + } + + // No re-perceive needed; context_window was updated in-place above. + stream.phase(Phase::Reason); + let response = self.reason(&processed, llm, stream).await?; + self.record_reasoning_cost(reason_cost, &mut state); + + decision = self.decide(&response).await?; + if let Some(result) = self.budget_terminal(self.estimate_action_cost(&decision), None) { + return Ok(self.finish_streaming_result(result, stream)); + } + + // Loop back to act with new decision + } } /// Handle BudgetExhausted results with optional forced synthesis. @@ -6824,6 +6901,7 @@ mod phase2_tests { // First response: LLM returns a tool call // Second response: LLM synthesizes the tool results into a final answer + // Third response: continuation re-prompt gets text-only, ending the outer loop let llm = SequentialMockLlm::new(vec![ CompletionResponse { content: Vec::new(), @@ -6843,6 +6921,15 @@ mod phase2_tests { usage: None, stop_reason: None, }, + // Outer loop continuation: model re-prompted, responds text-only + CompletionResponse { + content: vec![ContentBlock::Text { + text: "README loaded".to_string(), + }], + tool_calls: Vec::new(), + usage: None, + stop_reason: None, + }, ]); let result = engine @@ -6857,10 +6944,11 @@ mod phase2_tests { } #[tokio::test] - async fn run_cycle_completes_in_one_iteration_when_tool_fails_but_synthesis_exists() { + async fn run_cycle_completes_after_tool_fails_with_synthesis() { let mut engine = failing_tool_engine(); let llm = SequentialMockLlm::new(vec![ + // reason: LLM returns a tool call CompletionResponse { content: Vec::new(), tool_calls: vec![ToolCall { @@ -6871,6 +6959,16 @@ mod phase2_tests { usage: None, stop_reason: Some("tool_use".to_string()), }, + // act_with_tools re-prompt: LLM synthesizes tool failure + CompletionResponse { + content: vec![ContentBlock::Text { + text: "The file could not be read: path escapes working directory.".to_string(), + }], + tool_calls: Vec::new(), + usage: None, + stop_reason: None, + }, + // outer loop continuation: re-prompted model responds text-only CompletionResponse { content: vec![ContentBlock::Text { text: "The file could not be read: path escapes working directory.".to_string(), @@ -6892,7 +6990,11 @@ mod phase2_tests { iterations, .. } => { - assert_eq!(iterations, 1, "expected exactly one iteration"); + // iteration 1: tool call + synthesis, iteration 2: continuation text-only + assert_eq!( + iterations, 2, + "expected two iterations (tool + continuation)" + ); assert_eq!( response, "The file could not be read: path escapes working directory." @@ -7178,6 +7280,15 @@ mod phase2_tests { usage: None, stop_reason: None, }, + // Outer loop continuation: text-only response ends the loop + CompletionResponse { + content: vec![ContentBlock::Text { + text: "done".to_string(), + }], + tool_calls: Vec::new(), + usage: None, + stop_reason: None, + }, ]); let result = engine @@ -7238,6 +7349,15 @@ mod phase2_tests { }), stop_reason: None, }, + // Outer loop continuation: text-only response ends the loop + CompletionResponse { + content: vec![ContentBlock::Text { + text: "done".to_string(), + }], + tool_calls: Vec::new(), + usage: None, + stop_reason: None, + }, ]); let result = engine @@ -7292,6 +7412,15 @@ mod phase2_tests { usage: None, stop_reason: None, }, + // Outer loop continuation: text-only response ends the loop + CompletionResponse { + content: vec![ContentBlock::Text { + text: SAFE_FALLBACK_RESPONSE.to_string(), + }], + tool_calls: Vec::new(), + usage: None, + stop_reason: None, + }, ]); let result = engine @@ -7537,6 +7666,8 @@ mod phase2_tests { ), text_response("Tool answer part", Some("length"), None), text_response(" two", Some("stop"), None), + // Outer loop continuation: text-only response ends the loop + text_response("Tool answer part two", None, None), ]); let result = engine @@ -7545,7 +7676,7 @@ mod phase2_tests { .expect("run_cycle should succeed"); let (response, iterations, _) = expect_complete(result); - assert_eq!(iterations, 1); + assert_eq!(iterations, 2); assert_eq!(response, "Tool answer part two"); } @@ -7567,6 +7698,8 @@ mod phase2_tests { }, text_response(" and summarize it", Some("stop"), None), text_response("tool executed", Some("stop"), None), + // Outer loop continuation: text-only response ends the loop + text_response("tool executed", None, None), ]); let result = engine @@ -7599,6 +7732,8 @@ mod phase2_tests { ), text_response(&first, Some("max_tokens"), None), text_response(&second, Some("stop"), None), + // Outer loop continuation: text-only response ends the loop + text_response(&expected, None, None), ]); let result = engine @@ -7634,6 +7769,8 @@ mod phase2_tests { ), text_response("Act part", Some("length"), None), text_response(" complete", Some("stop"), None), + // Outer loop continuation: text-only response ends the loop + text_response("Act part complete", None, None), ]); let act_result = act_engine @@ -9394,7 +9531,12 @@ mod cancellation_tests { #[tokio::test] async fn run_cycle_streaming_emits_tool_events_and_synthesize_phase() { let mut engine = engine_with_executor(Arc::new(NoopToolExecutor), 3); - let llm = ScriptedLlm::new(vec![tool_use_response("call-1"), text_response("done")]); + // Third response: outer loop continuation re-prompt returns text-only + let llm = ScriptedLlm::new(vec![ + tool_use_response("call-1"), + text_response("done"), + text_response("done"), + ]); let (callback, events) = stream_recorder(); let result = engine @@ -16968,6 +17110,8 @@ mod error_path_coverage_tests { let llm = ScriptedLlm::ok(vec![ tool_use_response(vec![read_file_call("call-1")]), text_response("I was unable to read the file due to an error."), + // Outer loop continuation: text-only response ends the loop + text_response("I was unable to read the file due to an error."), ]); let result = engine @@ -16981,9 +17125,10 @@ mod error_path_coverage_tests { iterations, .. } => { + // iteration 1: tool call + synthesis, iteration 2: continuation text-only assert_eq!( - *iterations, 1, - "should complete in 1 iteration, not retry: got {iterations}" + *iterations, 2, + "expected two iterations (tool + continuation): got {iterations}" ); assert!( response.contains("unable to read") || response.contains("error"), @@ -17006,12 +17151,14 @@ mod error_path_coverage_tests { 2, // Only 2 iterations ); - // Only script the responses that will actually be consumed in 2 - // iterations: tool call → failure → tool call → failure → synthesis. + // Responses: reason (tool_use) → act_with_tools chains (tool_use → text) + // → outer loop continuation: reason (text-only) → act (text-only, exits) let llm = ScriptedLlm::ok(vec![ tool_use_response(vec![read_file_call("call-1")]), tool_use_response(vec![read_file_call("call-2")]), text_response("tools keep failing"), + // Outer loop continuation + text_response("tools keep failing"), ]); let result = engine @@ -17058,6 +17205,8 @@ mod error_path_coverage_tests { let llm = ScriptedLlm::ok(vec![ tool_use_response(vec![read_file_call("call-1")]), text_response("synthesized"), + // Outer loop continuation: text-only response ends the loop + text_response("synthesized"), ]); let result = engine diff --git a/engine/crates/fx-kernel/src/perceive.rs b/engine/crates/fx-kernel/src/perceive.rs index 1c0b2d58..d87aa7d6 100644 --- a/engine/crates/fx-kernel/src/perceive.rs +++ b/engine/crates/fx-kernel/src/perceive.rs @@ -597,7 +597,7 @@ mod tests { preferences.insert("tone".to_owned(), "concise".to_owned()); IdentityContext { - user_name: Some("Example User".to_owned()), + user_name: Some("Joe".to_owned()), preferences, personality_traits: vec!["helpful".to_owned()], } diff --git a/engine/crates/fx-kernel/src/reason.rs b/engine/crates/fx-kernel/src/reason.rs index 84c40f1f..31bd7060 100644 --- a/engine/crates/fx-kernel/src/reason.rs +++ b/engine/crates/fx-kernel/src/reason.rs @@ -468,7 +468,7 @@ mod tests { version: 2, }], identity_context: IdentityContext { - user_name: Some("Example User".to_owned()), + user_name: Some("Joe".to_owned()), preferences, personality_traits: vec!["helpful".to_owned()], }, @@ -497,9 +497,7 @@ mod tests { .contains("Goal: Draft and send a reply")); assert!(prompt.messages[0].content.contains("last_contact = Alex")); assert!(prompt.messages[0].content.contains("Identity context:")); - assert!(prompt.messages[0] - .content - .contains("User name: Example User")); + assert!(prompt.messages[0].content.contains("User name: Joe")); assert!(prompt.messages[0].content.contains("tone: direct")); assert!(prompt.messages[0] .content diff --git a/engine/crates/fx-kernel/src/system_prompt.rs b/engine/crates/fx-kernel/src/system_prompt.rs index b0402017..19f9211e 100644 --- a/engine/crates/fx-kernel/src/system_prompt.rs +++ b/engine/crates/fx-kernel/src/system_prompt.rs @@ -423,7 +423,7 @@ mod tests { restricted: vec!["kernel_modify".to_string()], working_dir: "/workspace".to_string(), }) - .user_context("Prefers short answers.") + .user_context("Joe prefers short answers.") .surface(Surface::HeadlessApi) .session(SessionContext { is_new: false, @@ -437,7 +437,7 @@ mod tests { "Behavioral:\nKeep answers grounded in evidence.", "Capabilities:\n- web_fetch: Fetch a web page", "Security:\n- Mode: capability\n- Restricted: kernel_modify\n- Working directory: /workspace", - "User context:\nPrefers short answers.", + "User context:\nJoe prefers short answers.", "Surface: Headless API. Return plain content without UI-specific references.", "Session:\n- State: continuing\n- Message count: 3\n- Recent summary: Reviewed deployment notes.", "Directives:\n- Return machine-readable content when asked.", diff --git a/engine/crates/fx-kernel/src/types.rs b/engine/crates/fx-kernel/src/types.rs index 040b08e4..7c303046 100644 --- a/engine/crates/fx-kernel/src/types.rs +++ b/engine/crates/fx-kernel/src/types.rs @@ -429,7 +429,7 @@ mod tests { relevant_semantic: vec![], active_procedures: vec![], identity_context: IdentityContext { - user_name: Some("Example User".to_owned()), + user_name: Some("Joe".to_owned()), preferences: parent_preferences, personality_traits: vec!["concise".to_owned()], }, @@ -465,7 +465,7 @@ mod tests { relevant_semantic: vec![], active_procedures: vec![], identity_context: IdentityContext { - user_name: Some("Example User".to_owned()), + user_name: Some("Joe".to_owned()), preferences: child_preferences, personality_traits: vec!["focused".to_owned()], }, @@ -603,7 +603,7 @@ mod tests { preferences.insert("lang".to_owned(), "en".to_owned()); let identity = IdentityContext { - user_name: Some("Example User".to_owned()), + user_name: Some("Joe".to_owned()), preferences, personality_traits: vec!["friendly".to_owned()], }; diff --git a/engine/crates/fx-llm/Cargo.toml b/engine/crates/fx-llm/Cargo.toml index eb2429b2..f56295d2 100644 --- a/engine/crates/fx-llm/Cargo.toml +++ b/engine/crates/fx-llm/Cargo.toml @@ -7,6 +7,7 @@ license.workspace = true [features] default = [] +llama-cpp = [] # Enable local LLM integration [dependencies] async-trait.workspace = true @@ -15,6 +16,7 @@ bytes = "1" fx-core.workspace = true futures.workspace = true http.workspace = true +# local LLM backend (not included in OSS release) pdf-extract = "0.7" reqwest.workspace = true serde.workspace = true diff --git a/engine/crates/fx-llm/src/local.rs b/engine/crates/fx-llm/src/local.rs index ecca10f8..a40b648b 100644 --- a/engine/crates/fx-llm/src/local.rs +++ b/engine/crates/fx-llm/src/local.rs @@ -1,9 +1,4 @@ -//! Local LLM provider implementation. -//! -//! Stub implementation for on-device inference. The `llama-cpp-sys` FFI -//! dependency was removed during open-source extraction; this module -//! preserves the public API surface so downstream crates compile, but -//! all inference calls return an error at runtime. +//! Local LLM provider implementation using llama.cpp. use async_trait::async_trait; use fx_core::error::LlmError; @@ -11,12 +6,15 @@ use tracing::{debug, warn}; use crate::{LlmProvider, LocalModelConfig}; -/// Local LLM provider (stub). +/// Local LLM provider using llama.cpp for on-device inference. /// -/// Inference is not yet available; all `generate` calls return an error. +/// This struct wraps the unsafe local LLM FFI bindings and provides +/// a safe, async Rust API. #[derive(Debug)] pub struct LocalModel { config: LocalModelConfig, + // Future: Add actual llama.cpp context handle + // context: Option<*mut llama_context>, } impl LocalModel { @@ -32,10 +30,12 @@ impl LocalModel { /// - `LlmError::Model`: Configuration is invalid /// - `LlmError::Inference`: Model file doesn't exist or can't be loaded pub fn new(config: LocalModelConfig) -> Result { + // Validate config (already validated in LocalModelConfig::new, but double-check) if config.context_size == 0 { return Err(LlmError::Model("context_size must be > 0".to_string())); } + // Check model file exists if !config.model_path.exists() { warn!("Model file does not exist: {}", config.model_path.display()); return Err(LlmError::Model(format!( @@ -44,16 +44,36 @@ impl LocalModel { ))); } - debug!("LocalModel created (stub); inference will fail at runtime"); + #[cfg(not(feature = "llama-cpp"))] + { + debug!("LocalModel created without llama-cpp feature; inference will fail at runtime"); + } Ok(Self { config }) } - /// Stub inference method. + /// Internal method to perform actual inference. + /// + /// This is where llama.cpp FFI calls would happen. + #[allow(dead_code)] + #[cfg(feature = "llama-cpp")] + fn infer_internal(&self, _prompt: &str, _max_tokens: u32) -> Result { + // Future: Actual llama.cpp inference + // 1. Tokenize prompt + // 2. Run inference loop + // 3. Decode tokens to string + // 4. Return result + + Err(LlmError::Inference( + "llama.cpp integration not yet implemented".to_string(), + )) + } + #[allow(dead_code)] + #[cfg(not(feature = "llama-cpp"))] fn infer_internal(&self, _prompt: &str, _max_tokens: u32) -> Result { Err(LlmError::Model( - "local inference not available; llama-cpp backend was removed".to_string(), + "llama-cpp feature not enabled; cannot perform local inference".to_string(), )) } } @@ -67,10 +87,26 @@ impl LlmProvider for LocalModel { max_tokens ); + // Run inference in blocking task (llama.cpp is CPU-bound) + let _config = self.config.clone(); + let _prompt = prompt.to_string(); + tokio::task::spawn_blocking(move || { - Err(LlmError::Model( - "local inference not available; llama-cpp backend was removed".to_string(), - )) + // Placeholder: would call self.infer_internal here + // For now, return error since feature is not enabled + #[cfg(feature = "llama-cpp")] + { + Err(LlmError::Inference( + "llama.cpp integration not yet implemented".to_string(), + )) + } + + #[cfg(not(feature = "llama-cpp"))] + { + Err(LlmError::Model( + "llama-cpp feature not enabled; cannot perform local inference".to_string(), + )) + } }) .await .map_err(|e| LlmError::Inference(format!("Task join error: {}", e)))? @@ -87,6 +123,7 @@ impl LlmProvider for LocalModel { prompt.len() ); + // For now, fall back to non-streaming and call callback once let result = self.generate(prompt, max_tokens).await?; callback(result.clone()); Ok(result) @@ -124,6 +161,7 @@ mod tests { #[test] fn test_model_name_extraction() { + // Create a temp file for testing let temp_dir = std::env::temp_dir(); let model_path = temp_dir.join("test-model.gguf"); std::fs::write(&model_path, b"fake model").unwrap(); @@ -133,11 +171,13 @@ mod tests { assert_eq!(model.model_name(), "test-model.gguf"); + // Cleanup std::fs::remove_file(&model_path).ok(); } #[tokio::test] - async fn test_generate_returns_error() { + async fn test_generate_without_feature() { + // Create a temp file let temp_dir = std::env::temp_dir(); let model_path = temp_dir.join("test-model-2.gguf"); std::fs::write(&model_path, b"fake model").unwrap(); @@ -146,13 +186,20 @@ mod tests { let model = LocalModel::new(config).unwrap(); let result = model.generate("test prompt", 10).await; + + // Without llama-cpp feature, should error + #[cfg(not(feature = "llama-cpp"))] assert!(result.is_err()); + // Cleanup std::fs::remove_file(&model_path).ok(); } #[tokio::test] - async fn test_streaming_falls_back_to_generate() { + async fn test_streaming_callback_signature() { + // This test verifies the streaming API signature works correctly + // without requiring actual model inference + let temp_dir = std::env::temp_dir(); let model_path = temp_dir.join("test-model-streaming.gguf"); std::fs::write(&model_path, b"fake model").unwrap(); @@ -160,10 +207,19 @@ mod tests { let config = LocalModelConfig::new(model_path.clone(), 2048, 0.7, 0.95, 512).unwrap(); let model = LocalModel::new(config).unwrap(); - let callback = Box::new(|_chunk: String| {}); + // Verify callback accepts owned String (not &str) + let callback = Box::new(|chunk: String| { + // In real use, this would send chunk to a channel/stream + assert!(!chunk.is_empty() || chunk.is_empty()); // Always true, just to use chunk + }); + let result = model.generate_streaming("test", 10, callback).await; + + // Without llama-cpp feature, generate() fails, so streaming also fails + #[cfg(not(feature = "llama-cpp"))] assert!(result.is_err()); + // Cleanup std::fs::remove_file(&model_path).ok(); } } diff --git a/engine/crates/fx-security/src/policy/tests.rs b/engine/crates/fx-security/src/policy/tests.rs index 36b2700b..35220de2 100644 --- a/engine/crates/fx-security/src/policy/tests.rs +++ b/engine/crates/fx-security/src/policy/tests.rs @@ -519,10 +519,10 @@ decision = "deny" #[test] fn test_condition_contact_target_partial_eq() { let cond1 = Condition::ContactTarget { - contact: "owner".to_string(), + contact: "joe".to_string(), }; let cond2 = Condition::ContactTarget { - contact: "owner".to_string(), + contact: "joe".to_string(), }; let cond3 = Condition::ContactTarget { contact: "alice".to_string(), diff --git a/engine/crates/fx-tools/src/node_run.rs b/engine/crates/fx-tools/src/node_run.rs index a14f0461..95caf307 100644 --- a/engine/crates/fx-tools/src/node_run.rs +++ b/engine/crates/fx-tools/src/node_run.rs @@ -264,11 +264,11 @@ mod tests { #[tokio::test] async fn resolves_node_by_name() { let transport = Arc::new(MockTransport::succeeding("ok\n")); - let state = make_state(vec![make_node("n1", "Worker Node A")], transport.clone()); + let state = make_state(vec![make_node("n1", "Mac Mini")], transport.clone()); let result = handle_node_run( &state, - &serde_json::json!({"node": "Worker Node A", "command": "ls"}), + &serde_json::json!({"node": "Mac Mini", "command": "ls"}), ) .await .expect("should resolve by name"); @@ -281,11 +281,11 @@ mod tests { #[tokio::test] async fn resolves_node_name_case_insensitive() { let transport = Arc::new(MockTransport::succeeding("ok\n")); - let state = make_state(vec![make_node("n1", "Worker Node B")], transport.clone()); + let state = make_state(vec![make_node("n1", "MacBook Pro")], transport.clone()); let result = handle_node_run( &state, - &serde_json::json!({"node": "worker node b", "command": "ls"}), + &serde_json::json!({"node": "macbook pro", "command": "ls"}), ) .await;