diff --git a/.github/workflows/ci-macos-compat.yml b/.github/workflows/ci-macos-compat.yml index a008ea3a0..2a25d6d3c 100644 --- a/.github/workflows/ci-macos-compat.yml +++ b/.github/workflows/ci-macos-compat.yml @@ -15,13 +15,15 @@ jobs: - os: warp-macos-15-arm64-6x timeout: 30 smoke: true - skip_zig: false + skip_zig: true - os: warp-macos-26-arm64-6x timeout: 30 smoke: false skip_zig: true # zig 0.15.2 MachO linker can't resolve libSystem on macOS 26 runs-on: ${{ matrix.os }} timeout-minutes: ${{ matrix.timeout }} + env: + CMUX_SKIP_ZIG_BUILD: ${{ matrix.skip_zig && '1' || '0' }} steps: - name: Checkout uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 @@ -119,8 +121,6 @@ jobs: done - name: Run unit tests - env: - CMUX_SKIP_ZIG_BUILD: ${{ matrix.skip_zig && '1' || '0' }} run: | set -euo pipefail SOURCE_PACKAGES_DIR="$PWD/.ci-source-packages" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 13581c5e5..116e2b541 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -40,15 +40,24 @@ jobs: remote-daemon-tests: runs-on: ubuntu-latest + env: + CGO_ENABLED: "0" steps: - name: Checkout uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + with: + submodules: recursive - name: Setup Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version-file: daemon/remote/go.mod + - name: Install tmux + run: | + sudo apt-get update + sudo apt-get install -y tmux + - name: Install zig run: | set -euo pipefail @@ -65,6 +74,14 @@ jobs: zig version fi + - name: Prepare Ghostty shim dependency + working-directory: daemon/remote/rust/ghostty-shim + run: ln -sfn ../../../../ghostty ghostty + + - name: Run Ghostty shim tests + working-directory: daemon/remote/rust/ghostty-shim + run: zig build test + - name: Run remote daemon tests working-directory: daemon/remote run: go test ./... @@ -72,6 +89,107 @@ jobs: - name: Validate remote daemon release assets run: ./tests/test_remote_daemon_release_assets.sh + remote-daemon-tmux-parity: + runs-on: ubuntu-latest + env: + CGO_ENABLED: "0" + CMUX_REMOTE_TEST_ARTIFACT_DIR: /tmp/remote-daemon-tmux-parity + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + with: + submodules: recursive + + - name: Setup Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: daemon/remote/go.mod + + - name: Install tmux + run: | + sudo apt-get update + sudo apt-get install -y tmux + + - name: Install zig + run: | + set -euo pipefail + ZIG_REQUIRED="0.15.2" + if command -v zig >/dev/null 2>&1 && zig version 2>/dev/null | grep -q "^${ZIG_REQUIRED}"; then + echo "zig ${ZIG_REQUIRED} already installed" + else + echo "Installing zig ${ZIG_REQUIRED} from tarball" + curl -fSL "https://ziglang.org/download/${ZIG_REQUIRED}/zig-x86_64-linux-${ZIG_REQUIRED}.tar.xz" -o /tmp/zig.tar.xz + tar xf /tmp/zig.tar.xz -C /tmp + sudo mv /tmp/zig-x86_64-linux-${ZIG_REQUIRED}/zig /usr/local/bin/zig + sudo rm -rf /usr/local/lib/zig + sudo mv /tmp/zig-x86_64-linux-${ZIG_REQUIRED}/lib /usr/local/lib/zig + zig version + fi + + - name: Prepare Ghostty shim dependency + working-directory: daemon/remote/rust/ghostty-shim + run: ln -sfn ../../../../ghostty ghostty + + - name: Run tmux parity regression + working-directory: daemon/remote + run: go test ./compat -run TestTmuxParityCommonCommands -count=1 -v + + - name: Upload tmux parity artifacts + if: failure() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: remote-daemon-tmux-parity-artifacts + path: /tmp/remote-daemon-tmux-parity + if-no-files-found: ignore + + remote-daemon-tui-resize: + runs-on: ubuntu-latest + env: + CGO_ENABLED: "0" + CMUX_REMOTE_TEST_ARTIFACT_DIR: /tmp/remote-daemon-tui-resize + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + with: + submodules: recursive + + - name: Setup Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version-file: daemon/remote/go.mod + + - name: Install zig + run: | + set -euo pipefail + ZIG_REQUIRED="0.15.2" + if command -v zig >/dev/null 2>&1 && zig version 2>/dev/null | grep -q "^${ZIG_REQUIRED}"; then + echo "zig ${ZIG_REQUIRED} already installed" + else + echo "Installing zig ${ZIG_REQUIRED} from tarball" + curl -fSL "https://ziglang.org/download/${ZIG_REQUIRED}/zig-x86_64-linux-${ZIG_REQUIRED}.tar.xz" -o /tmp/zig.tar.xz + tar xf /tmp/zig.tar.xz -C /tmp + sudo mv /tmp/zig-x86_64-linux-${ZIG_REQUIRED}/zig /usr/local/bin/zig + sudo rm -rf /usr/local/lib/zig + sudo mv /tmp/zig-x86_64-linux-${ZIG_REQUIRED}/lib /usr/local/lib/zig + zig version + fi + + - name: Prepare Ghostty shim dependency + working-directory: daemon/remote/rust/ghostty-shim + run: ln -sfn ../../../../ghostty ghostty + + - name: Run TUI and resize regression + working-directory: daemon/remote + run: go test ./compat -run 'TestSessionAttachPropagatesPTYResize|TestSessionAttachTUIResizeAndReattach' -count=1 -v + + - name: Upload TUI and resize artifacts + if: failure() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: remote-daemon-tui-resize-artifacts + path: /tmp/remote-daemon-tui-resize + if-no-files-found: ignore + web-typecheck: runs-on: ubuntu-latest defaults: @@ -92,6 +210,8 @@ jobs: tests: runs-on: warp-macos-15-arm64-6x + env: + CMUX_SKIP_ZIG_BUILD: "1" timeout-minutes: 30 steps: - name: Checkout @@ -130,6 +250,7 @@ jobs: ./scripts/download-prebuilt-ghosttykit.sh - name: Install zig + if: env.CMUX_SKIP_ZIG_BUILD != '1' run: | ZIG_REQUIRED="0.15.2" if command -v zig >/dev/null 2>&1 && zig version 2>/dev/null | grep -q "^${ZIG_REQUIRED}"; then @@ -250,12 +371,78 @@ jobs: CMUX_CLI_BIN="$CLI_BIN" python3 tests/test_cli_version_memory_guard.py + - name: Run cmux pty bridge regression + run: | + set -euo pipefail + + if ! command -v zig >/dev/null 2>&1; then + ZIG_REQUIRED="0.15.2" + curl -fSL "https://ziglang.org/download/${ZIG_REQUIRED}/zig-aarch64-macos-${ZIG_REQUIRED}.tar.xz" -o /tmp/zig.tar.xz + tar xf /tmp/zig.tar.xz -C /tmp + sudo mkdir -p /usr/local/bin /usr/local/lib + sudo cp -f /tmp/zig-aarch64-macos-${ZIG_REQUIRED}/zig /usr/local/bin/zig + sudo cp -rf /tmp/zig-aarch64-macos-${ZIG_REQUIRED}/lib /usr/local/lib/zig + export PATH="/usr/local/bin:$PATH" + fi + + CLI_BIN="" + while IFS= read -r candidate; do + if "$candidate" help 2>/dev/null | grep -q '^ pty'; then + CLI_BIN="$candidate" + break + fi + done < <( + find "$HOME/Library/Developer/Xcode/DerivedData" -path "*/Build/Products/Debug/cmux" -exec stat -f '%m %N' {} \; \ + | sort -nr \ + | cut -d' ' -f2- + ) + if [ -z "${CLI_BIN:-}" ] || [ ! -x "$CLI_BIN" ]; then + echo "cmux CLI binary with pty command not found in DerivedData" >&2 + exit 1 + fi + + CMUX_CLI_BIN="$CLI_BIN" ./tests/test_cmux_pty_cli_bridge.sh + + - name: Run cmux pty tmux parity regression + run: | + set -euo pipefail + + if ! command -v tmux >/dev/null 2>&1; then + if command -v brew >/dev/null 2>&1; then + brew install tmux + else + echo "tmux is required but Homebrew is unavailable" >&2 + exit 1 + fi + fi + + CLI_BIN="" + while IFS= read -r candidate; do + if "$candidate" help 2>/dev/null | grep -q '^ pty'; then + CLI_BIN="$candidate" + break + fi + done < <( + find "$HOME/Library/Developer/Xcode/DerivedData" -path "*/Build/Products/Debug/cmux" -exec stat -f '%m %N' {} \; \ + | sort -nr \ + | cut -d' ' -f2- + ) + if [ -z "${CLI_BIN:-}" ] || [ ! -x "$CLI_BIN" ]; then + echo "cmux CLI binary with pty command not found in DerivedData" >&2 + exit 1 + fi + + chmod +x ./tests/test_cmux_pty_tmux_parity.sh + CMUX_CLI_BIN="$CLI_BIN" ./tests/test_cmux_pty_tmux_parity.sh + tests-build-and-lag: # Build the full cmux scheme and run the lag regression on WarpBuild. # Keep lag validation separate from UI regressions so functional UI failures # and performance regressions stay isolated. Broader interactive UI suites # still run via test-e2e.yml on GitHub-hosted runners. runs-on: warp-macos-15-arm64-6x + env: + CMUX_SKIP_ZIG_BUILD: "1" timeout-minutes: 20 steps: - name: Checkout @@ -294,6 +481,7 @@ jobs: ./scripts/download-prebuilt-ghosttykit.sh - name: Install zig + if: env.CMUX_SKIP_ZIG_BUILD != '1' run: | ZIG_REQUIRED="0.15.2" if command -v zig >/dev/null 2>&1 && zig version 2>/dev/null | grep -q "^${ZIG_REQUIRED}"; then @@ -418,6 +606,8 @@ jobs: ui-regressions: runs-on: warp-macos-15-arm64-6x + env: + CMUX_SKIP_ZIG_BUILD: "1" timeout-minutes: 25 steps: - name: Checkout @@ -448,6 +638,7 @@ jobs: run: ./scripts/download-prebuilt-ghosttykit.sh - name: Install zig + if: env.CMUX_SKIP_ZIG_BUILD != '1' run: | ZIG_REQUIRED="0.15.2" if command -v zig >/dev/null 2>&1 && zig version 2>/dev/null | grep -q "^${ZIG_REQUIRED}"; then diff --git a/CLI/cmux.swift b/CLI/cmux.swift index a0120002d..7db32fd84 100644 --- a/CLI/cmux.swift +++ b/CLI/cmux.swift @@ -1680,6 +1680,9 @@ struct CMUXCLI { _ = try client.sendV2(method: "surface.send_text", params: sendParams) } + case "pty": + try runPty(commandArgs: commandArgs, client: client, windowOverride: windowId, jsonOutput: jsonOutput) + case "new-split": let (wsArg, rem0) = parseOption(commandArgs, name: "--workspace") let (panelArg, rem1) = parseOption(rem0, name: "--panel") @@ -2774,6 +2777,349 @@ struct CMUXCLI { return nil } + private func uint64FromAny(_ value: Any?) -> UInt64? { + if let value = value as? UInt64 { return value } + if let value = value as? Int { return value >= 0 ? UInt64(value) : nil } + if let value = value as? NSNumber { return value.uint64Value } + if let value = value as? String { return UInt64(value) } + return nil + } + + private struct PTYSurfaceDaemonInfo { + let socketPath: String + let sessionID: String + let workspaceID: String + let surfaceID: String + + init(payload: [String: Any]) throws { + guard let socketPath = payload["socket_path"] as? String, + !socketPath.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty, + let sessionID = payload["session_id"] as? String, + !sessionID.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty, + let workspaceID = payload["workspace_id"] as? String, + let surfaceID = payload["surface_id"] as? String else { + throw CLIError(message: "surface.daemon_info returned an incomplete daemon payload") + } + self.socketPath = socketPath + self.sessionID = sessionID + self.workspaceID = workspaceID + self.surfaceID = surfaceID + } + } + + private struct PTYWindowSize: Equatable { + let cols: Int + let rows: Int + } + + private struct PTYTerminalReadResult { + let offset: UInt64 + let eof: Bool + let data: Data + } + + private enum PTYDaemonRPCError: LocalizedError { + case invalidResponse(String) + case rpc(code: String, message: String) + + var errorDescription: String? { + switch self { + case .invalidResponse(let message): + return message + case .rpc(_, let message): + return message + } + } + } + + private struct PTYDaemonClient { + let socketPath: String + + func sessionAttach(sessionID: String, attachmentID: String, size: PTYWindowSize) throws { + _ = try call( + method: "session.attach", + params: [ + "session_id": sessionID, + "attachment_id": attachmentID, + "cols": max(1, size.cols), + "rows": max(1, size.rows), + ] + ) + } + + func sessionResize(sessionID: String, attachmentID: String, size: PTYWindowSize) throws { + _ = try call( + method: "session.resize", + params: [ + "session_id": sessionID, + "attachment_id": attachmentID, + "cols": max(1, size.cols), + "rows": max(1, size.rows), + ] + ) + } + + func sessionDetach(sessionID: String, attachmentID: String) throws { + _ = try call( + method: "session.detach", + params: [ + "session_id": sessionID, + "attachment_id": attachmentID, + ] + ) + } + + func terminalWrite(sessionID: String, data: Data) throws { + _ = try call( + method: "terminal.write", + params: [ + "session_id": sessionID, + "data": data.base64EncodedString(), + ] + ) + } + + func terminalRead( + sessionID: String, + offset: UInt64, + maxBytes: Int = 64 * 1024, + timeoutMilliseconds: Int = 250 + ) throws -> PTYTerminalReadResult { + let result = try call( + method: "terminal.read", + params: [ + "session_id": sessionID, + "offset": offset, + "max_bytes": maxBytes, + "timeout_ms": timeoutMilliseconds, + ] + ) + guard let nextOffset = Self.uint64FromAny(result["offset"]), + let eof = result["eof"] as? Bool, + let encoded = result["data"] as? String, + let data = Data(base64Encoded: encoded) else { + throw PTYDaemonRPCError.invalidResponse("terminal.read returned malformed output") + } + return PTYTerminalReadResult(offset: nextOffset, eof: eof, data: data) + } + + private func call(method: String, params: [String: Any]) throws -> [String: Any] { + let requestData = try JSONSerialization.data( + withJSONObject: [ + "id": 1, + "method": method, + "params": params, + ], + options: [] + ) + Data([0x0A]) + let responseData = try roundTripUnixSocket(socketPath: socketPath, request: requestData) + guard let responseLine = String(data: responseData, encoding: .utf8)? + .trimmingCharacters(in: .whitespacesAndNewlines), + !responseLine.isEmpty, + let lineData = responseLine.data(using: .utf8), + let envelope = try JSONSerialization.jsonObject(with: lineData) as? [String: Any] else { + throw PTYDaemonRPCError.invalidResponse("daemon returned invalid JSON") + } + + if let ok = envelope["ok"] as? Bool, ok == true { + guard let result = envelope["result"] as? [String: Any] else { + throw PTYDaemonRPCError.invalidResponse("daemon response was missing a result payload") + } + return result + } + + let errorPayload = envelope["error"] as? [String: Any] + let code = (errorPayload?["code"] as? String) ?? "unknown" + let message = (errorPayload?["message"] as? String) ?? "daemon request failed" + throw PTYDaemonRPCError.rpc(code: code, message: message) + } + + private func roundTripUnixSocket(socketPath: String, request: Data) throws -> Data { + let fd = socket(AF_UNIX, SOCK_STREAM, 0) + guard fd >= 0 else { + throw PTYDaemonRPCError.invalidResponse("failed to create daemon socket") + } + defer { Darwin.close(fd) } + + var timeout = timeval(tv_sec: 15, tv_usec: 0) + withUnsafePointer(to: &timeout) { pointer in + _ = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, pointer, socklen_t(MemoryLayout.size)) + _ = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, pointer, socklen_t(MemoryLayout.size)) + } + + var address = sockaddr_un() + address.sun_family = sa_family_t(AF_UNIX) + let pathBytes = Array(socketPath.utf8CString) + guard pathBytes.count <= MemoryLayout.size(ofValue: address.sun_path) else { + throw PTYDaemonRPCError.invalidResponse("daemon socket path is too long") + } + let sunPathOffset = MemoryLayout.offset(of: \.sun_path) ?? 0 + withUnsafeMutableBytes(of: &address) { rawBuffer in + let destination = rawBuffer.baseAddress!.advanced(by: sunPathOffset) + pathBytes.withUnsafeBytes { pathBuffer in + destination.copyMemory(from: pathBuffer.baseAddress!, byteCount: pathBytes.count) + } + } + + let addressLength = socklen_t(MemoryLayout.size(ofValue: address.sun_family) + pathBytes.count) + let connectResult = withUnsafePointer(to: &address) { + $0.withMemoryRebound(to: sockaddr.self, capacity: 1) { + Darwin.connect(fd, $0, addressLength) + } + } + guard connectResult == 0 else { + throw PTYDaemonRPCError.invalidResponse("failed to connect to daemon socket") + } + + try request.withUnsafeBytes { rawBuffer in + guard let baseAddress = rawBuffer.bindMemory(to: UInt8.self).baseAddress else { return } + var bytesRemaining = rawBuffer.count + var pointer = baseAddress + while bytesRemaining > 0 { + let written = Darwin.write(fd, pointer, bytesRemaining) + if written <= 0 { + throw PTYDaemonRPCError.invalidResponse("failed to write daemon request") + } + bytesRemaining -= written + pointer = pointer.advanced(by: written) + } + } + _ = shutdown(fd, SHUT_WR) + + var response = Data() + var scratch = [UInt8](repeating: 0, count: 4096) + while true { + let count = Darwin.read(fd, &scratch, scratch.count) + if count > 0 { + response.append(scratch, count: count) + continue + } + if count == 0 { + break + } + if errno == EAGAIN || errno == EWOULDBLOCK { + throw PTYDaemonRPCError.invalidResponse("timed out waiting for daemon response") + } + throw PTYDaemonRPCError.invalidResponse("failed to read daemon response") + } + return response + } + + private static func uint64FromAny(_ value: Any?) -> UInt64? { + switch value { + case let value as UInt64: + return value + case let value as Int: + return value >= 0 ? UInt64(value) : nil + case let value as NSNumber: + return value.uint64Value + case let value as String: + return UInt64(value) + default: + return nil + } + } + } + + private final class PTYRawModeGuard { + private let fd: Int32 + private let original: termios + private var restored = false + + init(fd: Int32 = STDIN_FILENO) throws { + self.fd = fd + var original = termios() + guard tcgetattr(fd, &original) == 0 else { + throw CLIError(message: "Failed to read terminal attributes: \(String(cString: strerror(errno)))") + } + var raw = original + cfmakeraw(&raw) + guard tcsetattr(fd, TCSANOW, &raw) == 0 else { + throw CLIError(message: "Failed to enable raw terminal mode: \(String(cString: strerror(errno)))") + } + self.original = original + } + + func restore() { + guard !restored else { return } + restored = true + var original = self.original + _ = tcsetattr(fd, TCSANOW, &original) + } + + deinit { + restore() + } + } + + private final class PTYInputPump { + private let queue = DispatchQueue(label: "cmux.pty.stdin", qos: .userInitiated) + private let stateLock = NSLock() + private let writeHandler: (Data) throws -> Void + private var running = true + + init(writeHandler: @escaping (Data) throws -> Void) { + self.writeHandler = writeHandler + } + + func start() { + queue.async { [weak self] in + self?.run() + } + } + + func stop() { + stateLock.lock() + running = false + stateLock.unlock() + } + + private func isRunning() -> Bool { + stateLock.lock() + let current = running + stateLock.unlock() + return current + } + + private func run() { + var pollDescriptor = pollfd(fd: STDIN_FILENO, events: Int16(POLLIN), revents: 0) + var buffer = [UInt8](repeating: 0, count: 8192) + + while isRunning() { + let pollResult = Darwin.poll(&pollDescriptor, 1, 200) + if pollResult < 0 { + if errno == EINTR { + continue + } + return + } + if pollResult == 0 || (pollDescriptor.revents & Int16(POLLIN)) == 0 { + continue + } + + let readCount = Darwin.read(STDIN_FILENO, &buffer, buffer.count) + if readCount > 0 { + do { + try writeHandler(Data(buffer.prefix(readCount))) + } catch { + stop() + return + } + continue + } + if readCount == 0 { + stop() + return + } + if errno == EINTR { + continue + } + stop() + return + } + } + } + private func parseBoolString(_ raw: String) -> Bool? { switch raw.lowercased() { case "1", "true", "yes", "on": @@ -2961,6 +3307,151 @@ struct CMUXCLI { ) } + private func runPty( + commandArgs: [String], + client: SocketClient, + windowOverride: String?, + jsonOutput: Bool + ) throws { + guard !jsonOutput else { + throw CLIError(message: "cmux pty is interactive only and does not support --json") + } + guard isatty(STDIN_FILENO) == 1, isatty(STDOUT_FILENO) == 1 else { + throw CLIError(message: "cmux pty requires a TTY on stdin and stdout") + } + + let workspaceArg = workspaceFromArgsOrEnv(commandArgs, windowOverride: windowOverride) + let explicitSurfaceArg = optionValue(commandArgs, name: "--surface") ?? optionValue(commandArgs, name: "--panel") + let workspaceHandle = try normalizeWorkspaceHandle( + workspaceArg, + client: client, + allowCurrent: workspaceArg == nil + ) + let surfaceHandle: String = try { + if let explicitSurfaceArg { + guard let resolved = try normalizeSurfaceHandle( + explicitSurfaceArg, + client: client, + workspaceHandle: workspaceHandle, + allowFocused: false + ) else { + throw CLIError(message: "Unable to resolve surface handle") + } + return resolved + } + var params: [String: Any] = [:] + if let workspaceHandle { + params["workspace_id"] = workspaceHandle + } + let currentPayload = try client.sendV2(method: "surface.current", params: params) + if let resolved = (currentPayload["surface_ref"] as? String) ?? (currentPayload["surface_id"] as? String) { + return resolved + } + throw CLIError(message: "No focused terminal surface") + }() + + var daemonInfoParams: [String: Any] = ["surface_id": surfaceHandle] + if let workspaceHandle { + daemonInfoParams["workspace_id"] = workspaceHandle + } + let daemonInfoPayload = try client.sendV2(method: "surface.daemon_info", params: daemonInfoParams) + let daemonInfo = try PTYSurfaceDaemonInfo(payload: daemonInfoPayload) + let daemonClient = PTYDaemonClient(socketPath: daemonInfo.socketPath) + let attachmentID = UUID().uuidString + let initialSize = currentPTYWindowSize() + + try daemonClient.sessionAttach( + sessionID: daemonInfo.sessionID, + attachmentID: attachmentID, + size: initialSize + ) + + let rawMode = try PTYRawModeGuard() + defer { + rawMode.restore() + } + + signal(SIGWINCH, SIG_IGN) + let resizeSource = DispatchSource.makeSignalSource(signal: SIGWINCH, queue: DispatchQueue.global(qos: .userInitiated)) + resizeSource.setEventHandler { + let nextSize = self.currentPTYWindowSize() + try? daemonClient.sessionResize( + sessionID: daemonInfo.sessionID, + attachmentID: attachmentID, + size: nextSize + ) + } + resizeSource.resume() + defer { + resizeSource.cancel() + try? daemonClient.sessionDetach(sessionID: daemonInfo.sessionID, attachmentID: attachmentID) + } + + let inputPump = PTYInputPump { data in + try daemonClient.terminalWrite(sessionID: daemonInfo.sessionID, data: data) + } + inputPump.start() + defer { + inputPump.stop() + } + + var nextOffset: UInt64 = 0 + while true { + do { + let readResult = try daemonClient.terminalRead( + sessionID: daemonInfo.sessionID, + offset: nextOffset + ) + nextOffset = readResult.offset + if !readResult.data.isEmpty { + try writePTYOutput(readResult.data) + } + if readResult.eof { + break + } + } catch let error as PTYDaemonRPCError { + if case .rpc(let code, _) = error, code == "deadline_exceeded" { + continue + } + if case .rpc(let code, _) = error, code == "not_found" { + break + } + throw CLIError(message: error.localizedDescription) + } catch { + throw CLIError(message: error.localizedDescription) + } + } + } + + private func currentPTYWindowSize(fd: Int32 = STDOUT_FILENO) -> PTYWindowSize { + var windowSize = winsize() + if ioctl(fd, TIOCGWINSZ, &windowSize) == 0 { + let cols = max(Int(windowSize.ws_col), 1) + let rows = max(Int(windowSize.ws_row), 1) + return PTYWindowSize(cols: cols, rows: rows) + } + return PTYWindowSize(cols: 80, rows: 24) + } + + private func writePTYOutput(_ data: Data) throws { + try data.withUnsafeBytes { rawBuffer in + guard let baseAddress = rawBuffer.bindMemory(to: UInt8.self).baseAddress else { return } + var bytesRemaining = rawBuffer.count + var pointer = baseAddress + while bytesRemaining > 0 { + let written = Darwin.write(STDOUT_FILENO, pointer, bytesRemaining) + if written < 0 { + if errno == EINTR { + continue + } + throw CLIError(message: "Failed to write PTY output: \(String(cString: strerror(errno)))") + } + bytesRemaining -= written + pointer = pointer.advanced(by: written) + } + } + } + private func displayTabHandle(_ raw: String?) -> String? { guard let raw else { return nil } let trimmed = raw.trimmingCharacters(in: .whitespacesAndNewlines) @@ -6302,6 +6793,21 @@ struct CMUXCLI { cmux new-workspace --cwd ~/projects/myapp cmux new-workspace --cwd . --command "npm test" """ + case "pty": + return """ + Usage: cmux pty [--workspace ] [--surface ] [--panel ] + + Attach the current terminal to the Rust-backed PTY session for a local cmux surface. + + Flags: + --workspace Workspace context (default: current workspace) + --surface Surface to mirror (default: current focused surface) + --panel Alias for --surface + + Example: + cmux pty + cmux pty --workspace workspace:2 --surface surface:3 + """ case "list-workspaces": return """ Usage: cmux list-workspaces @@ -11378,6 +11884,7 @@ struct CMUXCLI { workspace-action --action [--workspace ] [--title ] [--color <#hex|name>] list-workspaces new-workspace [--cwd ] [--command ] + pty [--workspace ] [--surface ] [--panel ] ssh [--name ] [--port <n>] [--identity <path>] [--ssh-option <opt>] [-- <remote-command-args>] remote-daemon-status [--os <darwin|linux>] [--arch <arm64|amd64>] new-split <left|right|up|down> [--workspace <id|ref>] [--surface <id|ref>] [--panel <id|ref>] diff --git a/GhosttyTabs.xcodeproj/project.pbxproj b/GhosttyTabs.xcodeproj/project.pbxproj index cf3cd96b9..cf20cdadd 100644 --- a/GhosttyTabs.xcodeproj/project.pbxproj +++ b/GhosttyTabs.xcodeproj/project.pbxproj @@ -331,7 +331,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "set -euo pipefail\nDEST=\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}\"\nGHOSTTY_DEST=\"${DEST}/ghostty\"\nTERMINFO_DEST=\"${DEST}/terminfo\"\nCMUX_SHELL_DEST=\"${DEST}/shell-integration\"\nSRC_SHARE=\"${SRCROOT}/ghostty/zig-out/share\"\nGHOSTTY_SRC=\"${SRC_SHARE}/ghostty\"\nTERMINFO_SRC=\"${SRC_SHARE}/terminfo\"\nFALLBACK_GHOSTTY=\"${SRCROOT}/Resources/ghostty\"\nFALLBACK_TERMINFO=\"${SRCROOT}/Resources/ghostty/terminfo\"\nTERMINFO_OVERLAY=\"${SRCROOT}/Resources/terminfo-overlay\"\nCMUX_SHELL_SRC=\"${SRCROOT}/Resources/shell-integration\"\nif [ -d \"$GHOSTTY_SRC\" ]; then\n mkdir -p \"$GHOSTTY_DEST\"\n rsync -a --delete \"$GHOSTTY_SRC/\" \"$GHOSTTY_DEST/\"\nelif [ -d \"$FALLBACK_GHOSTTY\" ]; then\n mkdir -p \"$GHOSTTY_DEST\"\n rsync -a --delete \"$FALLBACK_GHOSTTY/\" \"$GHOSTTY_DEST/\"\nfi\nif [ -d \"$TERMINFO_SRC\" ]; then\n mkdir -p \"$TERMINFO_DEST\"\n rsync -a --delete \"$TERMINFO_SRC/\" \"$TERMINFO_DEST/\"\nelif [ -d \"$FALLBACK_TERMINFO\" ]; then\n mkdir -p \"$TERMINFO_DEST\"\n rsync -a --delete \"$FALLBACK_TERMINFO/\" \"$TERMINFO_DEST/\"\nfi\n# Overlay any cmux-specific terminfo adjustments.\n# This intentionally does not use --delete so we only patch specific entries.\nif [ -d \"$TERMINFO_OVERLAY\" ]; then\n mkdir -p \"$TERMINFO_DEST\"\n rsync -a \"$TERMINFO_OVERLAY/\" \"$TERMINFO_DEST/\"\nfi\nif [ -d \"$CMUX_SHELL_SRC\" ]; then\n mkdir -p \"$CMUX_SHELL_DEST\"\n # Use '/.' so dotfiles like .zshenv/.zprofile are copied too.\n rsync -a \"$CMUX_SHELL_SRC/.\" \"$CMUX_SHELL_DEST/\"\nfi\nINFO_PLIST=\"${TARGET_BUILD_DIR}/${INFOPLIST_PATH}\"\nCOMMIT=\"$(git -C \"${SRCROOT}\" rev-parse --short=9 HEAD 2>/dev/null || true)\"\nif [ -n \"$COMMIT\" ] && [ -f \"$INFO_PLIST\" ]; then\n /usr/libexec/PlistBuddy -c \"Set :CMUXCommit $COMMIT\" \"$INFO_PLIST\" >/dev/null 2>&1 || /usr/libexec/PlistBuddy -c \"Add :CMUXCommit string $COMMIT\" \"$INFO_PLIST\" >/dev/null 2>&1 || true\nfi\n"; + shellScript = "set -euo pipefail\nDEST=\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}\"\nGHOSTTY_DEST=\"${DEST}/ghostty\"\nTERMINFO_DEST=\"${DEST}/terminfo\"\nCMUX_SHELL_DEST=\"${DEST}/shell-integration\"\nBIN_DEST=\"${DEST}/bin\"\nSRC_SHARE=\"${SRCROOT}/ghostty/zig-out/share\"\nGHOSTTY_SRC=\"${SRC_SHARE}/ghostty\"\nTERMINFO_SRC=\"${SRC_SHARE}/terminfo\"\nFALLBACK_GHOSTTY=\"${SRCROOT}/Resources/ghostty\"\nFALLBACK_TERMINFO=\"${SRCROOT}/Resources/ghostty/terminfo\"\nTERMINFO_OVERLAY=\"${SRCROOT}/Resources/terminfo-overlay\"\nCMUX_SHELL_SRC=\"${SRCROOT}/Resources/shell-integration\"\nif [ -d \"$GHOSTTY_SRC\" ]; then\n mkdir -p \"$GHOSTTY_DEST\"\n rsync -a --delete \"$GHOSTTY_SRC/\" \"$GHOSTTY_DEST/\"\nelif [ -d \"$FALLBACK_GHOSTTY\" ]; then\n mkdir -p \"$GHOSTTY_DEST\"\n rsync -a --delete \"$FALLBACK_GHOSTTY/\" \"$GHOSTTY_DEST/\"\nfi\nif [ -d \"$TERMINFO_SRC\" ]; then\n mkdir -p \"$TERMINFO_DEST\"\n rsync -a --delete \"$TERMINFO_SRC/\" \"$TERMINFO_DEST/\"\nelif [ -d \"$FALLBACK_TERMINFO\" ]; then\n mkdir -p \"$TERMINFO_DEST\"\n rsync -a --delete \"$FALLBACK_TERMINFO/\" \"$TERMINFO_DEST/\"\nfi\n# Overlay any cmux-specific terminfo adjustments.\n# This intentionally does not use --delete so we only patch specific entries.\nif [ -d \"$TERMINFO_OVERLAY\" ]; then\n mkdir -p \"$TERMINFO_DEST\"\n rsync -a \"$TERMINFO_OVERLAY/\" \"$TERMINFO_DEST/\"\nfi\nif [ -d \"$CMUX_SHELL_SRC\" ]; then\n mkdir -p \"$CMUX_SHELL_DEST\"\n # Use '/.' so dotfiles like .zshenv/.zprofile are copied too.\n rsync -a \"$CMUX_SHELL_SRC/.\" \"$CMUX_SHELL_DEST/\"\nfi\nif [ -d \"${SRCROOT}/ghostty\" ]; then\n mkdir -p \"$BIN_DEST\"\n case \"$(/usr/bin/uname -m)\" in\n arm64)\n GHOSTTY_HELPER_TARGET=\"aarch64-macos\"\n ;;\n x86_64)\n GHOSTTY_HELPER_TARGET=\"x86_64-macos\"\n ;;\n *)\n echo \"error: unsupported host architecture for Ghostty helper: $(/usr/bin/uname -m)\" >&2\n exit 1\n ;;\n esac\n \"${SRCROOT}/scripts/build-ghostty-cli-helper.sh\" \\\n --target \"$GHOSTTY_HELPER_TARGET\" \\\n --output \"$BIN_DEST/ghostty\"\nfi\nINFO_PLIST=\"${TARGET_BUILD_DIR}/${INFOPLIST_PATH}\"\nCOMMIT=\"$(git -C \"${SRCROOT}\" rev-parse --short=9 HEAD 2>/dev/null || true)\"\nif [ -n \"$COMMIT\" ] && [ -f \"$INFO_PLIST\" ]; then\n /usr/libexec/PlistBuddy -c \"Set :CMUXCommit $COMMIT\" \"$INFO_PLIST\" >/dev/null 2>&1 || /usr/libexec/PlistBuddy -c \"Add :CMUXCommit string $COMMIT\" \"$INFO_PLIST\" >/dev/null 2>&1 || true\nfi\n"; }; /* End PBXShellScriptBuildPhase section */ diff --git a/Sources/GhosttyTerminalView.swift b/Sources/GhosttyTerminalView.swift index 5fe127f32..e294f7914 100644 --- a/Sources/GhosttyTerminalView.swift +++ b/Sources/GhosttyTerminalView.swift @@ -10,6 +10,14 @@ import Bonsplit import IOSurface import UniformTypeIdentifiers +// Ghostty still exports these embedded-surface helpers, but the current +// generated public header in this branch no longer declares them. +@_silgen_name("ghostty_surface_clear_selection") +private func cmuxGhosttySurfaceClearSelection(_ surface: ghostty_surface_t) -> Bool + +@_silgen_name("ghostty_surface_select_cursor_cell") +private func cmuxGhosttySurfaceSelectCursorCell(_ surface: ghostty_surface_t) -> Bool + #if os(macOS) func cmuxShouldUseTransparentBackgroundWindow() -> Bool { let defaults = UserDefaults.standard @@ -1868,10 +1876,10 @@ class GhosttyApp { private func bellAudioPath() -> String? { guard let config else { return nil } - var value = ghostty_config_path_s() + var value: UnsafePointer<Int8>? let key = "bell-audio-path" guard ghostty_config_get(config, &value, key, UInt(key.lengthOfBytes(using: .utf8))), - let rawPath = value.path else { + let rawPath = value else { return nil } let path = String(cString: rawPath) @@ -2714,6 +2722,8 @@ final class TerminalSurface: Identifiable, ObservableObject { private let configTemplate: ghostty_surface_config_s? private let workingDirectory: String? private var initialCommand: String? + private var localDaemonBootstrap: LocalTerminalDaemonSurfaceBootstrap? + private var localDaemonSessionController: LocalTerminalDaemonSessionController? private let initialEnvironmentOverrides: [String: String] var requestedWorkingDirectory: String? { workingDirectory } private var additionalEnvironment: [String: String] @@ -2824,6 +2834,15 @@ final class TerminalSurface: Identifiable, ObservableObject { TerminalSurfaceRegistry.shared.register(self) } + func configureLocalDaemonBootstrap(_ bootstrap: LocalTerminalDaemonSurfaceBootstrap) { + localDaemonBootstrap = bootstrap + localDaemonSessionController = LocalTerminalDaemonSessionController(bootstrap: bootstrap) { [weak self] event in + DispatchQueue.main.async { + self?.handleLocalDaemonEvent(event) + } + } + } + func updateWorkspaceId(_ newTabId: UUID) { tabId = newTabId @@ -3059,6 +3078,79 @@ final class TerminalSurface: Identifiable, ObservableObject { #endif } + func localDaemonInfoPayload() -> [String: Any]? { + guard let localDaemonBootstrap else { return nil } + return [ + "backend": "rust_local_daemon", + "socket_path": localDaemonBootstrap.configuration.socketPath, + "daemon_binary_path": localDaemonBootstrap.configuration.daemonBinaryPath, + "session_id": localDaemonBootstrap.sessionID, + ] + } + + func processOutput(_ data: Data) { + guard let surface = surface, !data.isEmpty else { return } + data.withUnsafeBytes { buffer in + guard let baseAddress = buffer.baseAddress else { return } + let pointer = baseAddress.assumingMemoryBound(to: CChar.self) + ghostty_surface_process_output(surface, pointer, UInt(buffer.count)) + } + } + + private func currentGridSize() -> LocalTerminalDaemonGridSize? { + guard let surface = surface else { return nil } + let size = ghostty_surface_size(surface) + let columns = max(Int(size.columns), 1) + let rows = max(Int(size.rows), 1) + guard columns > 0, rows > 0 else { return nil } + return LocalTerminalDaemonGridSize(columns: columns, rows: rows) + } + + private func startLocalDaemonSessionIfNeeded() { + guard let localDaemonSessionController, + let gridSize = currentGridSize() else { + return + } + localDaemonSessionController.start(initialSize: gridSize) + } + + private func resizeLocalDaemonSessionIfNeeded() { + guard let localDaemonSessionController, + let gridSize = currentGridSize() else { + return + } + localDaemonSessionController.resize(gridSize) + } + + private func stopLocalDaemonSession(closeSession: Bool) { + localDaemonSessionController?.stop(closeSession: closeSession) + } + + private func handleLocalDaemonInput(_ data: Data) { + localDaemonSessionController?.send(data) + } + + private func handleLocalDaemonEvent(_ event: LocalTerminalDaemonControllerEvent) { + switch event { + case .output(let data): + processOutput(data) + case .failed(let message): + NSLog("local daemon session failed for surface %@: %@", id.uuidString, message) + case .exited: + let workspaceID = tabId + let surfaceID = id + Task { @MainActor in + guard let app = AppDelegate.shared, + let manager = app.tabManagerFor(tabId: workspaceID) ?? app.tabManager, + let workspace = manager.tabs.first(where: { $0.id == workspaceID }), + workspace.panels[surfaceID] != nil else { + return + } + manager.closePanelAfterChildExited(tabId: workspaceID, surfaceId: surfaceID) + } + } + } + func isAttached(to view: GhosttyNSView) -> Bool { attachedView === view && surface != nil } @@ -3302,6 +3394,7 @@ final class TerminalSurface: Identifiable, ObservableObject { /// before deinit; deinit will skip the free if already torn down. @MainActor func teardownSurface() { + stopLocalDaemonSession(closeSession: true) recordTeardownRequest(reason: "surface.teardown") markPortalLifecycleClosed(reason: "teardown") @@ -3495,6 +3588,24 @@ final class TerminalSurface: Identifiable, ObservableObject { surfaceCallbackContext = callbackContext surfaceConfig.scale_factor = scaleFactors.layer surfaceConfig.context = surfaceContext + if localDaemonSessionController != nil { + surfaceConfig.io_mode = GHOSTTY_SURFACE_IO_MANUAL + surfaceConfig.io_write_cb = { userdata, data, len in + guard let userdata, let data, len > 0 else { return } + let callbackContext = Unmanaged<GhosttySurfaceCallbackContext> + .fromOpaque(userdata) + .takeUnretainedValue() + let outboundBytes = Data(bytes: data, count: Int(len)) + if Thread.isMainThread { + callbackContext.terminalSurface?.handleLocalDaemonInput(outboundBytes) + } else { + DispatchQueue.main.async { + callbackContext.terminalSurface?.handleLocalDaemonInput(outboundBytes) + } + } + } + surfaceConfig.io_write_userdata = callbackContext.toOpaque() + } #if DEBUG let templateFontText = String(format: "%.2f", surfaceConfig.font_size) dlog( @@ -3558,6 +3669,10 @@ final class TerminalSurface: Identifiable, ObservableObject { } let createWithCommandAndWorkingDirectory = { [self] in + if localDaemonSessionController != nil { + createSurface() + return + } if let initialCommand, !initialCommand.isEmpty { initialCommand.withCString { cCommand in surfaceConfig.command = cCommand @@ -3633,6 +3748,7 @@ final class TerminalSurface: Identifiable, ObservableObject { lastXScale = scaleFactors.x lastYScale = scaleFactors.y } + startLocalDaemonSessionIfNeeded() // Some GhosttyKit builds can drop inherited font_size during post-create // config/scale reconciliation. If runtime points don't match the inherited @@ -3730,6 +3846,7 @@ final class TerminalSurface: Identifiable, ObservableObject { ghostty_surface_set_size(surface, wpx, hpx) lastPixelWidth = wpx lastPixelHeight = hpx + resizeLocalDaemonSessionIfNeeded() } // Let Ghostty continue rendering on its own wakeups for steady-state frames. @@ -3964,6 +4081,7 @@ final class TerminalSurface: Identifiable, ObservableObject { #endif deinit { + stopLocalDaemonSession(closeSession: true) markPortalLifecycleClosed(reason: "deinit") let callbackContext = surfaceCallbackContext @@ -4679,7 +4797,7 @@ class GhosttyNSView: NSView, NSUserInterfaceValidations { guard surface != nil else { return false } setKeyboardCopyModeActive(!keyboardCopyModeActive) if !keyboardCopyModeActive, let surface { - _ = ghostty_surface_clear_selection(surface) + _ = cmuxGhosttySurfaceClearSelection(surface) } return true } @@ -4690,13 +4808,13 @@ class GhosttyNSView: NSView, NSUserInterfaceValidations { keyboardCopyModeActive = active if active, let surface { keyboardCopyModeViewportRow = keyboardCopyModeSelectionAnchor(surface: surface)?.row - _ = ghostty_surface_clear_selection(surface) + _ = cmuxGhosttySurfaceClearSelection(surface) if keyboardCopyModeViewportRow == nil { keyboardCopyModeViewportRow = keyboardCopyModeImeViewportRow(surface: surface) } // Create a 1-cell selection at the terminal cursor to serve as a // visible cursor indicator in copy mode. - _ = ghostty_surface_select_cursor_cell(surface) + _ = cmuxGhosttySurfaceSelectCursorCell(surface) } else { keyboardCopyModeViewportRow = nil } @@ -4733,7 +4851,7 @@ class GhosttyNSView: NSView, NSUserInterfaceValidations { private func keyboardCopyModeSelectionAnchor(surface: ghostty_surface_t) -> (row: Int, y: Double)? { let size = ghostty_surface_size(surface) guard size.rows > 0, size.columns > 0 else { return nil } - guard ghostty_surface_select_cursor_cell(surface) else { return nil } + guard cmuxGhosttySurfaceSelectCursorCell(surface) else { return nil } var text = ghostty_text_s() guard ghostty_surface_read_selection(surface, &text) else { return nil } @@ -4754,7 +4872,7 @@ class GhosttyNSView: NSView, NSUserInterfaceValidations { guard let anchor = keyboardCopyModeSelectionAnchor(surface: surface) else { return } keyboardCopyModeViewportRow = anchor.row // Preserve the visible cursor indicator. - _ = ghostty_surface_select_cursor_cell(surface) + _ = cmuxGhosttySurfaceSelectCursorCell(surface) } private func copyCurrentViewportLinesToClipboard( @@ -4769,7 +4887,7 @@ class GhosttyNSView: NSView, NSUserInterfaceValidations { guard let anchor = keyboardCopyModeSelectionAnchor(surface: surface) else { return false } - _ = ghostty_surface_clear_selection(surface) + _ = cmuxGhosttySurfaceClearSelection(surface) var imeX: Double = 0 var imeY: Double = 0 @@ -4825,18 +4943,18 @@ class GhosttyNSView: NSView, NSUserInterfaceValidations { switch action { case .exit: - _ = ghostty_surface_clear_selection(surface) + _ = cmuxGhosttySurfaceClearSelection(surface) setKeyboardCopyModeActive(false) case .startSelection: keyboardCopyModeVisualActive = true case .clearSelection: keyboardCopyModeVisualActive = false - _ = ghostty_surface_clear_selection(surface) + _ = cmuxGhosttySurfaceClearSelection(surface) // Re-create 1-cell cursor at terminal cursor position. - _ = ghostty_surface_select_cursor_cell(surface) + _ = cmuxGhosttySurfaceSelectCursorCell(surface) case .copyAndExit: _ = performBindingAction("copy_to_clipboard") - _ = ghostty_surface_clear_selection(surface) + _ = cmuxGhosttySurfaceClearSelection(surface) setKeyboardCopyModeActive(false) case .copyLineAndExit: let startRow = currentKeyboardCopyModeViewportRow(surface: surface) @@ -4845,7 +4963,7 @@ class GhosttyNSView: NSView, NSUserInterfaceValidations { startRow: startRow, lineCount: count ) - _ = ghostty_surface_clear_selection(surface) + _ = cmuxGhosttySurfaceClearSelection(surface) setKeyboardCopyModeActive(false) case let .scrollLines(delta): _ = performBindingAction("scroll_page_lines:\(delta * count)") diff --git a/Sources/TerminalController.swift b/Sources/TerminalController.swift index c432b4296..d79d49c33 100644 --- a/Sources/TerminalController.swift +++ b/Sources/TerminalController.swift @@ -2101,6 +2101,8 @@ class TerminalController { return v2Result(id: id, self.v2SurfaceList(params: params)) case "surface.current": return v2Result(id: id, self.v2SurfaceCurrent(params: params)) + case "surface.daemon_info": + return v2Result(id: id, self.v2SurfaceDaemonInfo(params: params)) case "surface.focus": return v2Result(id: id, self.v2SurfaceFocus(params: params)) case "surface.split": @@ -2460,6 +2462,7 @@ class TerminalController { "feedback.submit", "surface.list", "surface.current", + "surface.daemon_info", "surface.focus", "surface.split", "surface.create", @@ -4562,6 +4565,49 @@ class TerminalController { return .ok(payload) } + private func v2SurfaceDaemonInfo(params: [String: Any]) -> V2CallResult { + guard let tabManager = v2ResolveTabManager(params: params) else { + return .err(code: "unavailable", message: "TabManager not available", data: nil) + } + + var result: V2CallResult = .err(code: "not_found", message: "Surface not found", data: nil) + v2MainSync { + guard let ws = v2ResolveWorkspace(params: params, tabManager: tabManager) else { + result = .err(code: "not_found", message: "Workspace not found", data: nil) + return + } + + let surfaceId = v2UUID(params, "surface_id") ?? ws.focusedPanelId + guard let surfaceId else { + result = .err(code: "not_found", message: "No focused surface", data: nil) + return + } + guard let terminalPanel = ws.panels[surfaceId] as? TerminalPanel else { + result = .err(code: "unsupported", message: "Surface is not a terminal", data: nil) + return + } + guard var payload = terminalPanel.surface.localDaemonInfoPayload() else { + result = .err( + code: "unsupported", + message: "Surface is not backed by the local Rust daemon", + data: ["surface_id": surfaceId.uuidString] + ) + return + } + + let paneId = ws.paneId(forPanelId: surfaceId)?.id + payload["workspace_id"] = ws.id.uuidString + payload["workspace_ref"] = v2Ref(kind: .workspace, uuid: ws.id) + payload["pane_id"] = v2OrNull(paneId?.uuidString) + payload["pane_ref"] = v2Ref(kind: .pane, uuid: paneId) + payload["surface_id"] = surfaceId.uuidString + payload["surface_ref"] = v2Ref(kind: .surface, uuid: surfaceId) + result = .ok(payload) + } + + return result + } + private func v2SurfaceFocus(params: [String: Any]) -> V2CallResult { guard let tabManager = v2ResolveTabManager(params: params) else { return .err(code: "unavailable", message: "TabManager not available", data: nil) diff --git a/Sources/Workspace.swift b/Sources/Workspace.swift index df0554f19..bb8a654d3 100644 --- a/Sources/Workspace.swift +++ b/Sources/Workspace.swift @@ -224,28 +224,6 @@ struct WorkspaceRemoteDaemonManifest: Decodable, Equatable { } } -private struct BonsplitCompatibilityTabIDPayload: Codable { - let id: UUID -} - -extension TabID { - var uuid: UUID { - if let id = Mirror(reflecting: self).children.first(where: { $0.label == "id" })?.value as? UUID { - return id - } - - let decoder = JSONDecoder() - let encoder = JSONEncoder() - - do { - let data = try encoder.encode(self) - return try decoder.decode(BonsplitCompatibilityTabIDPayload.self, from: data).id - } catch { - preconditionFailure("Failed to read Bonsplit TabID compatibility payload: \(error)") - } - } -} - extension Workspace { private static var compatibilityToggleZoomContextAction: TabContextAction? { TabContextAction(rawValue: "toggleZoom") @@ -316,7 +294,7 @@ extension Workspace { context: context, configTemplate: configTemplate, workingDirectory: workingDirectory, - initialCommand: startupCommandOverride ?? intendedInitialCommand, + initialCommand: startupCommandOverride, initialEnvironmentOverrides: initialEnvironmentOverrides, additionalEnvironment: additionalEnvironment ) @@ -331,7 +309,7 @@ extension Workspace { ) #endif if startupCommandOverride == nil, - let localDaemonStartupCommand = LocalTerminalDaemonBridge.startupCommand( + let localDaemonBootstrap = LocalTerminalDaemonBridge.bootstrapSession( sessionID: surface.id, workspaceID: workspaceId, portOrdinal: portOrdinal, @@ -342,17 +320,29 @@ extension Workspace { ) { #if DEBUG dlog( - "localDaemon.panel.command " + + "localDaemon.panel.bootstrap " + "surface=\(surface.id.uuidString.prefix(8)) " + "workspace=\(workspaceId.uuidString.prefix(8)) " + "applied=1" ) #endif - surface.setInitialCommand(localDaemonStartupCommand) + surface.configureLocalDaemonBootstrap(localDaemonBootstrap) + } else if startupCommandOverride == nil { +#if DEBUG + dlog( + "localDaemon.panel.bootstrap " + + "surface=\(surface.id.uuidString.prefix(8)) " + + "workspace=\(workspaceId.uuidString.prefix(8)) " + + "applied=0" + ) +#endif + if LocalTerminalDaemonBridge.requiresBootstrap() { + preconditionFailure("Local Rust daemon bootstrap is unavailable for surface \(surface.id.uuidString)") + } } else { #if DEBUG dlog( - "localDaemon.panel.command " + + "localDaemon.panel.bootstrap " + "surface=\(surface.id.uuidString.prefix(8)) " + "workspace=\(workspaceId.uuidString.prefix(8)) " + "applied=0" @@ -5495,14 +5485,22 @@ enum LocalTerminalDaemonBridge { if config == nil, let repoRoot = environment["CMUXTERM_REPO_ROOT"]?.trimmingCharacters(in: .whitespacesAndNewlines), !repoRoot.isEmpty { - let candidate = URL(fileURLWithPath: repoRoot, isDirectory: true) - .appendingPathComponent("daemon/remote/zig/zig-out/bin/cmuxd-remote", isDirectory: false) - .path - if fileManager.isExecutableFile(atPath: candidate) { - config = LocalTerminalDaemonConfiguration( - socketPath: rawSocketPath, - daemonBinaryPath: candidate - ) + let repoRootURL = URL(fileURLWithPath: repoRoot, isDirectory: true) + let relativePaths = [ + "daemon/remote/rust/target/debug/cmuxd-remote", + "daemon/remote/zig/zig-out/bin/cmuxd-remote", + ] + for relativePath in relativePaths { + let candidate = repoRootURL + .appendingPathComponent(relativePath, isDirectory: false) + .path + if fileManager.isExecutableFile(atPath: candidate) { + config = LocalTerminalDaemonConfiguration( + socketPath: rawSocketPath, + daemonBinaryPath: candidate + ) + break + } } } @@ -5525,6 +5523,12 @@ enum LocalTerminalDaemonBridge { return config } + static func requiresBootstrap( + environment: [String: String] = cmuxCurrentProcessEnvironment() + ) -> Bool { + !SessionRestorePolicy.isRunningUnderAutomatedTests(environment: environment) + } + private static func resolveWebSocketSecret(environment: [String: String]) -> String { if let explicit = environment["CMUX_MOBILE_WS_SECRET"]?.trimmingCharacters(in: .whitespacesAndNewlines), !explicit.isEmpty { @@ -5736,7 +5740,7 @@ enum LocalTerminalDaemonBridge { .appendingPathComponent("cmuxd-\(baseName).log", isDirectory: false) } - static func startupCommand( + static func bootstrapSession( sessionID: UUID, workspaceID: UUID, portOrdinal: Int, @@ -5747,7 +5751,7 @@ enum LocalTerminalDaemonBridge { environment: [String: String] = cmuxCurrentProcessEnvironment(), bundle: Bundle = .main, fileManager: FileManager = .default - ) -> String? { + ) -> LocalTerminalDaemonSurfaceBootstrap? { guard let configuration = ensureReachableConfiguration( environment: environment, bundle: bundle, @@ -5775,7 +5779,7 @@ enum LocalTerminalDaemonBridge { #if DEBUG dlog( - "localDaemon.startup " + + "localDaemon.bootstrap " + "socket=\(configuration.socketPath) " + "binary=\(configuration.daemonBinaryPath) " + "cmuxBundle=\(managedEnvironment["CMUX_BUNDLE_ID"] ?? "nil") " + @@ -5787,10 +5791,11 @@ enum LocalTerminalDaemonBridge { ) #endif - let startupScript = """ - exec \(shellSingleQuoted(configuration.daemonBinaryPath)) amux new \(shellSingleQuoted(sessionID.uuidString)) --quiet --socket \(shellSingleQuoted(configuration.socketPath)) -- \(shellSingleQuoted(daemonCommand)) - """ - return "sh -c \(shellSingleQuoted(startupScript))" + return LocalTerminalDaemonSurfaceBootstrap( + configuration: configuration, + sessionID: sessionID.uuidString, + command: daemonCommand + ) } private static func daemonSessionCommand( @@ -5841,6 +5846,483 @@ enum LocalTerminalDaemonBridge { } } +struct LocalTerminalDaemonSurfaceBootstrap { + let configuration: LocalTerminalDaemonConfiguration + let sessionID: String + let command: String +} + +struct LocalTerminalDaemonGridSize: Equatable { + let columns: Int + let rows: Int +} + +enum LocalTerminalDaemonControllerEvent { + case output(Data) + case exited + case failed(String) +} + +private enum LocalTerminalDaemonRPCError: LocalizedError { + case invalidResponse(String) + case rpc(code: String, message: String) + + var errorDescription: String? { + switch self { + case .invalidResponse(let message): + return message + case .rpc(_, let message): + return message + } + } +} + +private struct LocalTerminalDaemonTerminalOpenResult { + let attachmentID: String + let offset: UInt64 +} + +private struct LocalTerminalDaemonTerminalReadResult { + let offset: UInt64 + let eof: Bool + let data: Data +} + +private struct LocalTerminalDaemonRPCClient { + let socketPath: String + + func terminalOpen( + sessionID: String, + command: String, + cols: Int, + rows: Int + ) throws -> LocalTerminalDaemonTerminalOpenResult { + let result = try call( + method: "terminal.open", + params: [ + "session_id": sessionID, + "command": command, + "cols": cols, + "rows": rows, + ] + ) + guard let attachmentID = result["attachment_id"] as? String, + let offset = uint64FromAny(result["offset"]) else { + throw LocalTerminalDaemonRPCError.invalidResponse("terminal.open did not return attachment state") + } + return LocalTerminalDaemonTerminalOpenResult(attachmentID: attachmentID, offset: offset) + } + + func terminalWrite(sessionID: String, data: Data) throws { + _ = try call( + method: "terminal.write", + params: [ + "session_id": sessionID, + "data": data.base64EncodedString(), + ] + ) + } + + func terminalRead( + sessionID: String, + offset: UInt64, + maxBytes: Int, + timeoutMilliseconds: Int + ) throws -> LocalTerminalDaemonTerminalReadResult { + let result = try call( + method: "terminal.read", + params: [ + "session_id": sessionID, + "offset": offset, + "max_bytes": maxBytes, + "timeout_ms": timeoutMilliseconds, + ] + ) + guard let nextOffset = uint64FromAny(result["offset"]), + let eof = result["eof"] as? Bool, + let encoded = result["data"] as? String, + let data = Data(base64Encoded: encoded) else { + throw LocalTerminalDaemonRPCError.invalidResponse("terminal.read returned malformed payload") + } + return LocalTerminalDaemonTerminalReadResult(offset: nextOffset, eof: eof, data: data) + } + + func sessionResize( + sessionID: String, + attachmentID: String, + size: LocalTerminalDaemonGridSize + ) throws { + _ = try call( + method: "session.resize", + params: [ + "session_id": sessionID, + "attachment_id": attachmentID, + "cols": size.columns, + "rows": size.rows, + ] + ) + } + + func sessionDetach(sessionID: String, attachmentID: String) throws { + _ = try call( + method: "session.detach", + params: [ + "session_id": sessionID, + "attachment_id": attachmentID, + ] + ) + } + + func sessionClose(sessionID: String) throws { + _ = try call( + method: "session.close", + params: ["session_id": sessionID] + ) + } + + private func call(method: String, params: [String: Any]) throws -> [String: Any] { + let requestData = try JSONSerialization.data( + withJSONObject: [ + "id": 1, + "method": method, + "params": params, + ], + options: [] + ) + Data([0x0A]) + let responseData = try Self.roundTripUnixSocket(socketPath: socketPath, request: requestData) + guard let responseLine = String(data: responseData, encoding: .utf8)? + .trimmingCharacters(in: .whitespacesAndNewlines), + !responseLine.isEmpty, + let lineData = responseLine.data(using: .utf8), + let envelope = try JSONSerialization.jsonObject(with: lineData) as? [String: Any] + else { + throw LocalTerminalDaemonRPCError.invalidResponse("daemon returned invalid JSON") + } + + if let ok = envelope["ok"] as? Bool, ok == true { + guard let result = envelope["result"] as? [String: Any] else { + throw LocalTerminalDaemonRPCError.invalidResponse("daemon response was missing result payload") + } + return result + } + + let errorPayload = envelope["error"] as? [String: Any] + let code = (errorPayload?["code"] as? String) ?? "unknown" + let message = (errorPayload?["message"] as? String) ?? "daemon request failed" + throw LocalTerminalDaemonRPCError.rpc(code: code, message: message) + } + + private static func roundTripUnixSocket(socketPath: String, request: Data) throws -> Data { + let fd = socket(AF_UNIX, SOCK_STREAM, 0) + guard fd >= 0 else { + throw LocalTerminalDaemonRPCError.invalidResponse("failed to create local daemon socket") + } + defer { Darwin.close(fd) } + + var timeout = timeval(tv_sec: 15, tv_usec: 0) + withUnsafePointer(to: &timeout) { pointer in + _ = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, pointer, socklen_t(MemoryLayout<timeval>.size)) + _ = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, pointer, socklen_t(MemoryLayout<timeval>.size)) + } + + var address = sockaddr_un() + address.sun_family = sa_family_t(AF_UNIX) + let pathBytes = Array(socketPath.utf8CString) + guard pathBytes.count <= MemoryLayout.size(ofValue: address.sun_path) else { + throw LocalTerminalDaemonRPCError.invalidResponse("daemon socket path is too long") + } + let sunPathOffset = MemoryLayout<sockaddr_un>.offset(of: \.sun_path) ?? 0 + withUnsafeMutableBytes(of: &address) { rawBuffer in + let destination = rawBuffer.baseAddress!.advanced(by: sunPathOffset) + pathBytes.withUnsafeBytes { pathBuffer in + destination.copyMemory(from: pathBuffer.baseAddress!, byteCount: pathBytes.count) + } + } + + let addressLength = socklen_t(MemoryLayout.size(ofValue: address.sun_family) + pathBytes.count) + let connectResult = withUnsafePointer(to: &address) { + $0.withMemoryRebound(to: sockaddr.self, capacity: 1) { + Darwin.connect(fd, $0, addressLength) + } + } + guard connectResult == 0 else { + throw LocalTerminalDaemonRPCError.invalidResponse("failed to connect to local daemon socket") + } + + try request.withUnsafeBytes { rawBuffer in + guard let baseAddress = rawBuffer.bindMemory(to: UInt8.self).baseAddress else { return } + var bytesRemaining = rawBuffer.count + var pointer = baseAddress + while bytesRemaining > 0 { + let written = Darwin.write(fd, pointer, bytesRemaining) + if written <= 0 { + throw LocalTerminalDaemonRPCError.invalidResponse("failed to write daemon request") + } + bytesRemaining -= written + pointer = pointer.advanced(by: written) + } + } + _ = shutdown(fd, SHUT_WR) + + var response = Data() + var scratch = [UInt8](repeating: 0, count: 4096) + while true { + let count = Darwin.read(fd, &scratch, scratch.count) + if count > 0 { + response.append(scratch, count: count) + continue + } + if count == 0 { + break + } + if errno == EAGAIN || errno == EWOULDBLOCK { + throw LocalTerminalDaemonRPCError.invalidResponse("timed out waiting for daemon response") + } + throw LocalTerminalDaemonRPCError.invalidResponse("failed to read daemon response") + } + return response + } + + private func uint64FromAny(_ value: Any?) -> UInt64? { + Self.uint64FromAny(value) + } + + private static func uint64FromAny(_ value: Any?) -> UInt64? { + switch value { + case let number as NSNumber: + return number.uint64Value + case let string as String: + return UInt64(string) + default: + return nil + } + } +} + +final class LocalTerminalDaemonSessionController { + private let bootstrap: LocalTerminalDaemonSurfaceBootstrap + private let rpcClient: LocalTerminalDaemonRPCClient + private let eventHandler: @Sendable (LocalTerminalDaemonControllerEvent) -> Void + private let stateLock = NSLock() + private let readQueue = DispatchQueue(label: "cmux.local-daemon.read", qos: .userInitiated) + private let writeQueue = DispatchQueue(label: "cmux.local-daemon.write", qos: .userInitiated) + private let readTimeoutMilliseconds = 250 + private let maxReadBytes = 64 * 1024 + + private var latestSize = LocalTerminalDaemonGridSize(columns: 80, rows: 24) + private var attachmentID: String? + private var nextOffset: UInt64 = 0 + private var pendingWrites: [Data] = [] + private var started = false + private var stopped = false + private var finished = false + + init( + bootstrap: LocalTerminalDaemonSurfaceBootstrap, + eventHandler: @escaping @Sendable (LocalTerminalDaemonControllerEvent) -> Void + ) { + self.bootstrap = bootstrap + self.rpcClient = LocalTerminalDaemonRPCClient(socketPath: bootstrap.configuration.socketPath) + self.eventHandler = eventHandler + } + + func start(initialSize: LocalTerminalDaemonGridSize) { + let shouldStart = withStateLock { () -> Bool in + latestSize = initialSize + guard !started, !stopped else { return false } + started = true + return true + } + guard shouldStart else { return } + readQueue.async { [weak self] in + self?.runReadLoop(initialSize: initialSize) + } + } + + func send(_ data: Data) { + guard !data.isEmpty else { return } + writeQueue.async { [weak self] in + self?.write(data) + } + } + + func resize(_ size: LocalTerminalDaemonGridSize) { + let activeAttachmentID = withStateLock { () -> String? in + latestSize = size + guard !stopped else { return nil } + return self.attachmentID + } + guard let activeAttachmentID else { return } + writeQueue.async { [weak self] in + guard let self else { return } + do { + try self.rpcClient.sessionResize( + sessionID: self.bootstrap.sessionID, + attachmentID: activeAttachmentID, + size: size + ) + } catch { + self.finishFailureIfNeeded(message: error.localizedDescription) + } + } + } + + func stop(closeSession: Bool) { + let attachmentSnapshot = withStateLock { () -> String? in + guard !stopped else { return nil } + stopped = true + return attachmentID + } + writeQueue.async { [weak self] in + guard let self else { return } + guard self.withStateLock({ !self.finished }) else { return } + if closeSession { + try? self.rpcClient.sessionClose(sessionID: self.bootstrap.sessionID) + } else if let attachmentSnapshot { + try? self.rpcClient.sessionDetach( + sessionID: self.bootstrap.sessionID, + attachmentID: attachmentSnapshot + ) + } + } + } + + private func write(_ data: Data) { + let activeAttachmentID = withStateLock { () -> String? in + guard !stopped else { return nil } + guard let currentAttachmentID = self.attachmentID else { + pendingWrites.append(data) + return nil + } + return currentAttachmentID + } + guard activeAttachmentID != nil else { return } + do { + try rpcClient.terminalWrite(sessionID: bootstrap.sessionID, data: data) + } catch { + finishFailureIfNeeded(message: error.localizedDescription) + } + } + + private func runReadLoop(initialSize: LocalTerminalDaemonGridSize) { + do { + let openResult = try rpcClient.terminalOpen( + sessionID: bootstrap.sessionID, + command: bootstrap.command, + cols: max(1, initialSize.columns), + rows: max(1, initialSize.rows) + ) + + let (queuedWrites, resizedSize, shouldAbort) = withStateLock { () -> ([Data], LocalTerminalDaemonGridSize?, Bool) in + if stopped { + finished = true + return ([], nil, true) + } + attachmentID = openResult.attachmentID + nextOffset = openResult.offset + let writes = self.pendingWrites + self.pendingWrites.removeAll(keepingCapacity: false) + let resizedSize = latestSize == initialSize ? nil : latestSize + return (writes, resizedSize, false) + } + + if shouldAbort { + try? rpcClient.sessionClose(sessionID: bootstrap.sessionID) + return + } + + if let resizedSize { + try? rpcClient.sessionResize( + sessionID: bootstrap.sessionID, + attachmentID: openResult.attachmentID, + size: resizedSize + ) + } + + for chunk in queuedWrites { + try rpcClient.terminalWrite(sessionID: bootstrap.sessionID, data: chunk) + } + + while !isStopped() { + let offset = withStateLock { nextOffset } + do { + let readResult = try rpcClient.terminalRead( + sessionID: bootstrap.sessionID, + offset: offset, + maxBytes: maxReadBytes, + timeoutMilliseconds: readTimeoutMilliseconds + ) + + withStateLock { + nextOffset = readResult.offset + } + + if !readResult.data.isEmpty { + eventHandler(.output(readResult.data)) + } + + if readResult.eof { + let shouldEmit = withStateLock { () -> Bool in + guard !finished else { return false } + finished = true + stopped = true + attachmentID = nil + return true + } + if shouldEmit { + eventHandler(.exited) + } + return + } + } catch let error as LocalTerminalDaemonRPCError { + if case .rpc(let code, _) = error, code == "deadline_exceeded" { + continue + } + if isStopped() { + return + } + finishFailureIfNeeded(message: error.localizedDescription) + return + } catch { + if isStopped() { + return + } + finishFailureIfNeeded(message: error.localizedDescription) + return + } + } + } catch { + if isStopped() { + return + } + finishFailureIfNeeded(message: error.localizedDescription) + } + } + + private func finishFailureIfNeeded(message: String) { + let shouldEmit = withStateLock { () -> Bool in + guard !finished else { return false } + finished = true + stopped = true + attachmentID = nil + return true + } + if shouldEmit { + eventHandler(.failed(message)) + } + } + + private func isStopped() -> Bool { + withStateLock { stopped } + } + + private func withStateLock<T>(_ body: () -> T) -> T { + stateLock.lock() + defer { stateLock.unlock() } + return body() + } +} + /// Workspace represents a sidebar tab. /// Each workspace contains one BonsplitController that manages split panes and nested surfaces. @MainActor diff --git a/cmuxTests/GhosttyConfigTests.swift b/cmuxTests/GhosttyConfigTests.swift index a25d4100f..16b27efdd 100644 --- a/cmuxTests/GhosttyConfigTests.swift +++ b/cmuxTests/GhosttyConfigTests.swift @@ -1486,52 +1486,43 @@ final class LocalTerminalDaemonBridgeTests: XCTestCase { XCTAssertEqual(configuration.daemonBinaryPath, fakeDaemonBinary.path) } - func testWorkspaceInitialTerminalUsesLocalDaemonStartupCommandWhenConfigured() throws { + func testBootstrapSessionBuildsDirectRustSessionCommandWhenConfigured() throws { LocalTerminalDaemonBridge.testingConfiguration = LocalTerminalDaemonConfiguration( socketPath: "/tmp/cmuxd-test.sock", daemonBinaryPath: "/tmp/cmuxd-remote-test" ) + let sessionID = UUID() + let workspaceID = UUID() let workingDirectory = FileManager.default.temporaryDirectory .appendingPathComponent("cmux-local-daemon-workspace") .path - let workspace = Workspace( - workingDirectory: workingDirectory, - initialTerminalEnvironment: ["EXPLICIT_ENV": "present"] + let bootstrap = try XCTUnwrap( + LocalTerminalDaemonBridge.bootstrapSession( + sessionID: sessionID, + workspaceID: workspaceID, + portOrdinal: 0, + workingDirectory: workingDirectory, + intendedCommand: nil, + initialEnvironmentOverrides: ["EXPLICIT_ENV": "present"], + additionalEnvironment: [:] + ) ) - let panelId = try XCTUnwrap(workspace.focusedPanelId) - let terminalPanel = try XCTUnwrap(workspace.panels[panelId] as? TerminalPanel) - let command = try XCTUnwrap(terminalPanel.surface.debugInitialCommand()) + XCTAssertEqual(bootstrap.configuration.socketPath, "/tmp/cmuxd-test.sock") + XCTAssertEqual(bootstrap.configuration.daemonBinaryPath, "/tmp/cmuxd-remote-test") + XCTAssertEqual(bootstrap.sessionID, sessionID.uuidString) + let command = bootstrap.command XCTAssertTrue( - command.hasPrefix("sh -c '"), - "Expected local daemon startup command to be shell wrapped, got: \(command)" - ) - XCTAssertTrue( - command.contains("/tmp/cmuxd-remote-test"), - "Expected local daemon binary path in startup command, got: \(command)" - ) - XCTAssertTrue( - command.contains("session new"), - "Expected daemon session creation in startup command, got: \(command)" - ) - XCTAssertTrue( - command.contains("--quiet"), - "Expected app daemon startup command to suppress session UUID output, got: \(command)" - ) - XCTAssertTrue( - command.contains(terminalPanel.id.uuidString), - "Expected panel UUID in startup command, got: \(command)" - ) - XCTAssertTrue( - command.contains("/tmp/cmuxd-test.sock"), - "Expected local daemon startup command, got: \(command)" + command.contains("exec "), + "Expected shell exec in daemon command, got: \(command)" ) + XCTAssertFalse(command.contains("amux new"), "Local bootstrap should not shell out to amux new, got: \(command)") XCTAssertTrue(command.contains("CMUX_SURFACE_ID="), "Expected surface export in daemon command, got: \(command)") - XCTAssertTrue(command.contains(terminalPanel.id.uuidString), "Expected surface UUID in daemon command, got: \(command)") + XCTAssertTrue(command.contains(sessionID.uuidString), "Expected surface UUID in daemon command, got: \(command)") XCTAssertTrue(command.contains("CMUX_WORKSPACE_ID="), "Expected workspace export in daemon command, got: \(command)") - XCTAssertTrue(command.contains(workspace.id.uuidString), "Expected workspace UUID in daemon command, got: \(command)") + XCTAssertTrue(command.contains(workspaceID.uuidString), "Expected workspace UUID in daemon command, got: \(command)") XCTAssertTrue(command.contains("EXPLICIT_ENV="), "Expected initial environment export in daemon command, got: \(command)") XCTAssertTrue(command.contains("present"), "Expected initial environment value in daemon command, got: \(command)") XCTAssertTrue(command.contains("cd "), "Expected working-directory hop in daemon command, got: \(command)") @@ -1545,7 +1536,7 @@ final class LocalTerminalDaemonBridgeTests: XCTestCase { XCTAssertTrue(command.contains("TERM_PROGRAM="), "Expected daemon command to export TERM_PROGRAM, got: \(command)") } - func testWorkspaceInitialTerminalPreservesRequestedInitialCommandInsideLocalDaemonSession() throws { + func testWorkspaceInitialTerminalUsesDirectLocalDaemonBootstrapWhenConfigured() throws { LocalTerminalDaemonBridge.testingConfiguration = LocalTerminalDaemonConfiguration( socketPath: "/tmp/cmuxd-test.sock", daemonBinaryPath: "/tmp/cmuxd-remote-test" @@ -1554,12 +1545,36 @@ final class LocalTerminalDaemonBridgeTests: XCTestCase { let workspace = Workspace(initialTerminalCommand: "printf READY") let panelId = try XCTUnwrap(workspace.focusedPanelId) let terminalPanel = try XCTUnwrap(workspace.panels[panelId] as? TerminalPanel) - let command = try XCTUnwrap(terminalPanel.surface.debugInitialCommand()) + XCTAssertNil(terminalPanel.surface.debugInitialCommand()) + + let daemonInfo = try XCTUnwrap(terminalPanel.surface.localDaemonInfoPayload()) + XCTAssertEqual(daemonInfo["socket_path"] as? String, "/tmp/cmuxd-test.sock") + XCTAssertEqual(daemonInfo["daemon_binary_path"] as? String, "/tmp/cmuxd-remote-test") + XCTAssertEqual(daemonInfo["session_id"] as? String, terminalPanel.id.uuidString) + } + + func testBootstrapSessionPreservesRequestedInitialCommandInsideLocalDaemonSession() throws { + LocalTerminalDaemonBridge.testingConfiguration = LocalTerminalDaemonConfiguration( + socketPath: "/tmp/cmuxd-test.sock", + daemonBinaryPath: "/tmp/cmuxd-remote-test" + ) + + let bootstrap = try XCTUnwrap( + LocalTerminalDaemonBridge.bootstrapSession( + sessionID: UUID(), + workspaceID: UUID(), + portOrdinal: 0, + workingDirectory: nil, + intendedCommand: "printf READY", + initialEnvironmentOverrides: [:], + additionalEnvironment: [:] + ) + ) - XCTAssertTrue(command.contains("printf READY"), "Expected initial command inside daemon launch, got: \(command)") + XCTAssertTrue(bootstrap.command.contains("printf READY"), "Expected initial command inside daemon session command, got: \(bootstrap.command)") } - func testStartupCommandStartsManagedDaemonWhenSocketIsConfiguredButOffline() throws { + func testBootstrapSessionStartsManagedDaemonWhenSocketIsConfiguredButOffline() throws { let temporaryDirectory = FileManager.default.temporaryDirectory .appendingPathComponent(UUID().uuidString, isDirectory: true) try FileManager.default.createDirectory(at: temporaryDirectory, withIntermediateDirectories: true) @@ -1581,8 +1596,8 @@ final class LocalTerminalDaemonBridgeTests: XCTestCase { let sessionID = UUID() let workspaceID = UUID() - let command = try XCTUnwrap( - LocalTerminalDaemonBridge.startupCommand( + let bootstrap = try XCTUnwrap( + LocalTerminalDaemonBridge.bootstrapSession( sessionID: sessionID, workspaceID: workspaceID, portOrdinal: 0, @@ -1606,10 +1621,10 @@ final class LocalTerminalDaemonBridgeTests: XCTestCase { daemonBinaryPath: fakeDaemonBinary.path ) ) - XCTAssertTrue(command.contains(fakeDaemonBinary.path), "Expected daemon binary path in startup command, got: \(command)") - XCTAssertTrue(command.contains(socketPath), "Expected daemon socket path in startup command, got: \(command)") - XCTAssertTrue(command.contains(sessionID.uuidString), "Expected session ID in startup command, got: \(command)") - XCTAssertTrue(command.contains(workspaceID.uuidString), "Expected workspace ID in startup command, got: \(command)") + XCTAssertEqual(bootstrap.configuration.daemonBinaryPath, fakeDaemonBinary.path) + XCTAssertEqual(bootstrap.configuration.socketPath, socketPath) + XCTAssertEqual(bootstrap.sessionID, sessionID.uuidString) + XCTAssertTrue(bootstrap.command.contains(workspaceID.uuidString), "Expected workspace ID in daemon command, got: \(bootstrap.command)") } private func makeListeningUnixSocket(at path: String) throws -> Int32 { diff --git a/cmuxTests/TabManagerUnitTests.swift b/cmuxTests/TabManagerUnitTests.swift index 7866a6cc2..b34507618 100644 --- a/cmuxTests/TabManagerUnitTests.swift +++ b/cmuxTests/TabManagerUnitTests.swift @@ -989,7 +989,10 @@ final class BonsplitZoomCompatibilityTests: XCTestCase { receivedPaneId = incomingPaneId } - let paneId = controller.allPaneIds.first + guard let paneId = controller.allPaneIds.first else { + XCTFail("Expected a default Bonsplit pane") + return + } controller.onTabCloseRequest?(tabId, paneId) XCTAssertEqual(receivedTabId?.uuid, tabId.uuid) diff --git a/daemon/remote/cmd/cmuxd-remote/ioctl_unix_darwin.go b/daemon/remote/cmd/cmuxd-remote/ioctl_unix_darwin.go new file mode 100644 index 000000000..2890b47b2 --- /dev/null +++ b/daemon/remote/cmd/cmuxd-remote/ioctl_unix_darwin.go @@ -0,0 +1,13 @@ +//go:build darwin + +package main + +import "syscall" + +func ioctlReadTermiosRequest() uintptr { + return syscall.TIOCGETA +} + +func ioctlWriteTermiosRequest() uintptr { + return syscall.TIOCSETA +} diff --git a/daemon/remote/cmd/cmuxd-remote/ioctl_unix_linux.go b/daemon/remote/cmd/cmuxd-remote/ioctl_unix_linux.go new file mode 100644 index 000000000..17d39c01e --- /dev/null +++ b/daemon/remote/cmd/cmuxd-remote/ioctl_unix_linux.go @@ -0,0 +1,13 @@ +//go:build linux + +package main + +import "syscall" + +func ioctlReadTermiosRequest() uintptr { + return syscall.TCGETS +} + +func ioctlWriteTermiosRequest() uintptr { + return syscall.TCSETS +} diff --git a/daemon/remote/cmd/cmuxd-remote/main.go b/daemon/remote/cmd/cmuxd-remote/main.go index 25da88aab..4f2ec89cd 100644 --- a/daemon/remote/cmd/cmuxd-remote/main.go +++ b/daemon/remote/cmd/cmuxd-remote/main.go @@ -54,6 +54,8 @@ func run(args []string, stdin io.Reader, stdout, stderr io.Writer) int { fs := flag.NewFlagSet("serve", flag.ContinueOnError) fs.SetOutput(stderr) stdio := fs.Bool("stdio", false, "serve over stdin/stdout") + unixMode := fs.Bool("unix", false, "serve over a Unix socket") + socketPath := fs.String("socket", "", "Unix socket path") tlsMode := fs.Bool("tls", false, "serve over TLS") listenAddr := fs.String("listen", "", "TLS listen address") serverID := fs.String("server-id", "", "server identifier for ticket verification") @@ -63,8 +65,18 @@ func run(args []string, stdin io.Reader, stdout, stderr io.Writer) int { if err := fs.Parse(args[1:]); err != nil { return 2 } - if *stdio == *tlsMode { - _, _ = fmt.Fprintln(stderr, "serve requires exactly one of --stdio or --tls") + modeCount := 0 + if *stdio { + modeCount++ + } + if *unixMode { + modeCount++ + } + if *tlsMode { + modeCount++ + } + if modeCount != 1 { + _, _ = fmt.Fprintln(stderr, "serve requires exactly one of --stdio, --unix, or --tls") return 2 } if *stdio { @@ -74,6 +86,13 @@ func run(args []string, stdin io.Reader, stdout, stderr io.Writer) int { } return 0 } + if *unixMode { + if err := runUnixServer(*socketPath); err != nil { + _, _ = fmt.Fprintf(stderr, "serve failed: %v\n", err) + return 1 + } + return 0 + } if err := runTLSServer(direct.Config{ ServerID: *serverID, TicketSecret: []byte(*ticketSecret), @@ -85,8 +104,12 @@ func run(args []string, stdin io.Reader, stdout, stderr io.Writer) int { return 1 } return 0 + case "session": + return runSessionCLI(args[1:]) case "cli": return runCLI(args[1:]) + case "list", "ls", "attach", "status", "history", "kill", "new": + return runSessionCLI(args) default: usage(stderr) return 2 @@ -97,7 +120,9 @@ func usage(w io.Writer) { _, _ = fmt.Fprintln(w, "Usage:") _, _ = fmt.Fprintln(w, " cmuxd-remote version") _, _ = fmt.Fprintln(w, " cmuxd-remote serve --stdio") + _, _ = fmt.Fprintln(w, " cmuxd-remote serve --unix --socket <path>") _, _ = fmt.Fprintln(w, " cmuxd-remote serve --tls --listen <addr> --server-id <id> --ticket-secret <secret> --cert-file <path> --key-file <path>") + _, _ = fmt.Fprintln(w, " cmuxd-remote session <command> [args...]") _, _ = fmt.Fprintln(w, " cmuxd-remote cli <command> [args...]") } @@ -107,6 +132,42 @@ func runStdioServer(stdin io.Reader, stdout io.Writer) error { return rpc.NewServer(server.handleRequest).Serve(stdin, stdout) } +func runUnixServer(socketPath string) error { + if socketPath == "" { + return errors.New("unix server requires --socket") + } + if err := os.MkdirAll(filepath.Dir(socketPath), 0o755); err != nil { + return err + } + _ = os.Remove(socketPath) + + listener, err := net.Listen("unix", socketPath) + if err != nil { + return err + } + defer func() { + _ = listener.Close() + _ = os.Remove(socketPath) + }() + + server := newDaemonServer() + defer server.closeAll() + + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) { + return nil + } + return err + } + go func(conn net.Conn) { + defer conn.Close() + _ = rpc.NewServer(server.handleRequest).Serve(conn, conn) + }(conn) + } +} + func runTLSServer(cfg direct.Config) error { server := newDaemonServer() defer server.closeAll() @@ -181,6 +242,10 @@ func (s *daemonServer) handleRequest(req rpc.Request) rpc.Response { return s.handleSessionDetach(req) case "session.status": return s.handleSessionStatus(req) + case "session.list": + return s.handleSessionList(req) + case "session.history": + return s.handleSessionHistory(req) case "terminal.open": return s.handleTerminalOpen(req) case "terminal.read": @@ -699,6 +764,52 @@ func (s *daemonServer) handleSessionStatus(req rpc.Request) rpc.Response { } } +func (s *daemonServer) handleSessionList(req rpc.Request) rpc.Response { + sessions := s.sessions.List() + result := make([]map[string]any, 0, len(sessions)) + for _, status := range sessions { + result = append(result, map[string]any{ + "session_id": status.SessionID, + "attachment_count": len(status.Attachments), + "effective_cols": status.EffectiveCols, + "effective_rows": status.EffectiveRows, + }) + } + return rpc.Response{ + ID: req.ID, + OK: true, + Result: map[string]any{"sessions": result}, + } +} + +func (s *daemonServer) handleSessionHistory(req rpc.Request) rpc.Response { + sessionID, ok := getStringParam(req.Params, "session_id") + if !ok || sessionID == "" { + return rpc.Response{ + ID: req.ID, + OK: false, + Error: &rpc.Error{ + Code: "invalid_params", + Message: "session.history requires session_id", + }, + } + } + + history, err := s.terminals.History(sessionID) + if err != nil { + return rpc.Response{ + ID: req.ID, + OK: false, + Error: terminalError(err), + } + } + return rpc.Response{ + ID: req.ID, + OK: true, + Result: map[string]any{"session_id": sessionID, "history": string(history)}, + } +} + func (s *daemonServer) handleTerminalOpen(req rpc.Request) rpc.Response { command, ok := getStringParam(req.Params, "command") if !ok || command == "" { @@ -735,7 +846,15 @@ func (s *daemonServer) handleTerminalOpen(req rpc.Request) rpc.Response { } } - sessionID, attachmentID := s.sessions.Open(cols, rows) + requestedSessionID, _ := getStringParam(req.Params, "session_id") + sessionID, attachmentID, err := s.sessions.Open(requestedSessionID, cols, rows) + if err != nil { + return rpc.Response{ + ID: req.ID, + OK: false, + Error: sessionError(err), + } + } status, err := s.sessions.Status(sessionID) if err != nil { return rpc.Response{ID: req.ID, OK: false, Error: sessionError(err)} @@ -937,6 +1056,8 @@ func sessionError(err error) *rpc.Error { return nil case session.ErrSessionNotFound: return &rpc.Error{Code: "not_found", Message: "session not found"} + case session.ErrSessionExists: + return &rpc.Error{Code: "already_exists", Message: err.Error()} case session.ErrAttachmentNotFound: return &rpc.Error{Code: "not_found", Message: "attachment not found"} case session.ErrInvalidSize: diff --git a/daemon/remote/cmd/cmuxd-remote/main_test.go b/daemon/remote/cmd/cmuxd-remote/main_test.go index 85a08bf91..529512b6d 100644 --- a/daemon/remote/cmd/cmuxd-remote/main_test.go +++ b/daemon/remote/cmd/cmuxd-remote/main_test.go @@ -1,12 +1,19 @@ package main import ( - "encoding/base64" "bufio" + "encoding/base64" "encoding/json" + "errors" "fmt" "io" + "net" + "os" + "path/filepath" + "strings" "testing" + + "github.com/manaflow-ai/cmux/daemon/remote/internal/rpc" ) func TestServeStdioSupportsHelloAndSessionLifecycle(t *testing.T) { @@ -131,8 +138,8 @@ func TestServeStdioSupportsTerminalOpenReadAndWrite(t *testing.T) { t.Fatalf("terminal.read echo result missing: %+v", readEcho) } echoChunk := decodeBase64Field(t, echoResult, "data") - if string(echoChunk) != "hello\r\n" { - t.Fatalf("echo chunk = %q, want %q", string(echoChunk), "hello\r\n") + if string(echoChunk) != "hello\n" { + t.Fatalf("echo chunk = %q, want %q", string(echoChunk), "hello\n") } _ = stdinW.Close() @@ -141,6 +148,110 @@ func TestServeStdioSupportsTerminalOpenReadAndWrite(t *testing.T) { } } +func TestServeStdioRejectsDuplicateTerminalOpenWithoutCorruptingExistingSession(t *testing.T) { + t.Parallel() + + stdinR, stdinW := io.Pipe() + stdoutR, stdoutW := io.Pipe() + + done := make(chan int, 1) + go func() { + done <- run([]string{"serve", "--stdio"}, stdinR, stdoutW, io.Discard) + }() + + reader := bufio.NewReader(stdoutR) + send := func(line string) map[string]any { + t.Helper() + + if _, err := io.WriteString(stdinW, line+"\n"); err != nil { + t.Fatalf("write request: %v", err) + } + + respLine, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("read response: %v", err) + } + + var payload map[string]any + if err := json.Unmarshal([]byte(respLine), &payload); err != nil { + t.Fatalf("decode response: %v", err) + } + return payload + } + + firstOpen := send(`{"id":1,"method":"terminal.open","params":{"session_id":"dup-demo","command":"printf READY; stty raw -echo -onlcr; exec cat","cols":120,"rows":40}}`) + if ok, _ := firstOpen["ok"].(bool); !ok { + t.Fatalf("first terminal.open should succeed: %+v", firstOpen) + } + + secondOpen := send(`{"id":2,"method":"terminal.open","params":{"session_id":"dup-demo","command":"printf BAD; exec cat","cols":80,"rows":24}}`) + if ok, _ := secondOpen["ok"].(bool); ok { + t.Fatalf("second terminal.open should fail: %+v", secondOpen) + } + if got := nestedString(secondOpen, "error", "code"); got != "already_exists" { + t.Fatalf("second terminal.open error code = %q, want %q", got, "already_exists") + } + + read := send(`{"id":3,"method":"terminal.read","params":{"session_id":"dup-demo","offset":0,"max_bytes":1024,"timeout_ms":1000}}`) + if ok, _ := read["ok"].(bool); !ok { + t.Fatalf("terminal.read should still succeed for original session: %+v", read) + } + readResult, ok := read["result"].(map[string]any) + if !ok { + t.Fatalf("terminal.read result missing: %+v", read) + } + if string(decodeBase64Field(t, readResult, "data")) != "READY" { + t.Fatalf("terminal.read data = %q, want %q", string(decodeBase64Field(t, readResult, "data")), "READY") + } + + _ = stdinW.Close() + if code := <-done; code != 0 { + t.Fatalf("serve exit code = %d, want 0", code) + } +} + +func TestSessionAttachDetachesIfRawModeSetupFails(t *testing.T) { + t.Parallel() + + socketPath := startTestUnixDaemon(t) + open := callUnixRPC(t, socketPath, map[string]any{ + "id": 1, + "method": "terminal.open", + "params": map[string]any{ + "session_id": "attach-cleanup", + "command": "cat", + "cols": 80, + "rows": 24, + }, + }) + if ok, _ := open["ok"].(bool); !ok { + t.Fatalf("terminal.open should succeed: %+v", open) + } + + if code := sessionAttach(socketPath, "attach-cleanup"); code != 1 { + t.Fatalf("sessionAttach exit code = %d, want 1 when raw mode setup fails", code) + } + + status := callUnixRPC(t, socketPath, map[string]any{ + "id": 2, + "method": "session.status", + "params": map[string]any{ + "session_id": "attach-cleanup", + }, + }) + if ok, _ := status["ok"].(bool); !ok { + t.Fatalf("session.status should succeed: %+v", status) + } + attachments := status["result"].(map[string]any)["attachments"].([]any) + if len(attachments) != 1 { + t.Fatalf("expected only the bootstrap attachment after failed attach, got %+v", attachments) + } + attachmentID := attachments[0].(map[string]any)["attachment_id"].(string) + if strings.HasPrefix(attachmentID, "cli-") { + t.Fatalf("failed attach left a cli attachment behind: %+v", attachments) + } +} + func decodeBase64Field(t *testing.T, payload map[string]any, key string) []byte { t.Helper() @@ -158,3 +269,101 @@ func decodeBase64Field(t *testing.T, payload map[string]any, key string) []byte func jsonNumber(value float64) string { return fmt.Sprintf("%.0f", value) } + +func nestedString(payload map[string]any, keys ...string) string { + current := payload + for index, key := range keys { + value, ok := current[key] + if !ok { + return "" + } + if index == len(keys)-1 { + text, _ := value.(string) + return text + } + next, _ := value.(map[string]any) + if next == nil { + return "" + } + current = next + } + return "" +} + +func startTestUnixDaemon(t *testing.T) string { + t.Helper() + + socketDir, err := os.MkdirTemp("", "cmuxd-test-") + if err != nil { + t.Fatalf("mkdir temp socket dir: %v", err) + } + shortDir := filepath.Join(os.TempDir(), filepath.Base(socketDir)) + if renameErr := os.Rename(socketDir, shortDir); renameErr == nil { + socketDir = shortDir + } + t.Cleanup(func() { + _ = os.RemoveAll(socketDir) + }) + + socketPath := filepath.Join(socketDir, "daemon.sock") + listener, err := net.Listen("unix", socketPath) + if err != nil { + t.Fatalf("listen on unix socket: %v", err) + } + + server := newDaemonServer() + done := make(chan struct{}) + go func() { + defer close(done) + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) { + return + } + return + } + go func(conn net.Conn) { + defer conn.Close() + _ = rpc.NewServer(server.handleRequest).Serve(conn, conn) + }(conn) + } + }() + + t.Cleanup(func() { + _ = listener.Close() + server.closeAll() + <-done + }) + return socketPath +} + +func callUnixRPC(t *testing.T, socketPath string, payload map[string]any) map[string]any { + t.Helper() + + conn, err := net.Dial("unix", socketPath) + if err != nil { + t.Fatalf("dial unix socket %s: %v", socketPath, err) + } + defer conn.Close() + + reader := bufio.NewReader(conn) + encoded, err := json.Marshal(payload) + if err != nil { + t.Fatalf("marshal payload: %v", err) + } + if _, err := conn.Write(append(encoded, '\n')); err != nil { + t.Fatalf("write payload: %v", err) + } + + line, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("read response: %v", err) + } + + var response map[string]any + if err := json.Unmarshal([]byte(line), &response); err != nil { + t.Fatalf("decode response %q: %v", line, err) + } + return response +} diff --git a/daemon/remote/cmd/cmuxd-remote/session_cli.go b/daemon/remote/cmd/cmuxd-remote/session_cli.go new file mode 100644 index 000000000..ec345552d --- /dev/null +++ b/daemon/remote/cmd/cmuxd-remote/session_cli.go @@ -0,0 +1,512 @@ +package main + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync/atomic" + "syscall" + "time" + "unsafe" +) + +func runSessionCLI(args []string) int { + socketPath, filtered, err := resolveSessionSocket(args) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + if len(filtered) == 0 { + sessionUsage() + return 2 + } + + switch filtered[0] { + case "ls", "list": + return sessionList(socketPath) + case "status": + if len(filtered) < 2 { + fmt.Fprintln(os.Stderr, "status requires a session id") + return 2 + } + return sessionStatus(socketPath, filtered[1]) + case "history": + if len(filtered) < 2 { + fmt.Fprintln(os.Stderr, "history requires a session id") + return 2 + } + return sessionHistory(socketPath, filtered[1]) + case "kill": + if len(filtered) < 2 { + fmt.Fprintln(os.Stderr, "kill requires a session id") + return 2 + } + return sessionKill(socketPath, filtered[1]) + case "new": + return sessionNew(socketPath, filtered[1:]) + case "attach": + if len(filtered) < 2 { + fmt.Fprintln(os.Stderr, "attach requires a session id") + return 2 + } + return sessionAttach(socketPath, filtered[1]) + default: + sessionUsage() + return 2 + } +} + +func resolveSessionSocket(args []string) (string, []string, error) { + socketPath := findSocketArg(args) + filtered := stripSocketArg(args) + if socketPath == "" { + socketPath = strings.TrimSpace(os.Getenv("CMUXD_UNIX_PATH")) + } + if socketPath == "" { + socketPath = strings.TrimSpace(os.Getenv("CMUX_SOCKET_PATH")) + } + if socketPath == "" { + return "", nil, errors.New("missing --socket and CMUXD_UNIX_PATH") + } + return socketPath, filtered, nil +} + +func findSocketArg(args []string) string { + for i := 0; i < len(args); i++ { + if args[i] == "--socket" && i+1 < len(args) { + return args[i+1] + } + } + return "" +} + +func stripSocketArg(args []string) []string { + out := make([]string, 0, len(args)) + for i := 0; i < len(args); i++ { + if args[i] == "--socket" && i+1 < len(args) { + i++ + continue + } + out = append(out, args[i]) + } + return out +} + +func sessionList(socketPath string) int { + result, err := callJSONRPCValue(socketPath, "session.list", map[string]any{}) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + value, ok := result.(map[string]any) + if !ok { + fmt.Fprintln(os.Stderr, "session.list returned an invalid response") + return 1 + } + sessions, _ := value["sessions"].([]any) + if len(sessions) == 0 { + fmt.Println("No sessions") + return 0 + } + + for _, item := range sessions { + session, _ := item.(map[string]any) + sessionID := stringField(session["session_id"]) + statusResult, err := callJSONRPCValue(socketPath, "session.status", map[string]any{ + "session_id": sessionID, + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + status, ok := statusResult.(map[string]any) + if !ok { + fmt.Fprintln(os.Stderr, "session.status returned an invalid response") + return 1 + } + + effectiveCols := intField(status["effective_cols"]) + effectiveRows := intField(status["effective_rows"]) + attachments, _ := status["attachments"].([]any) + if len(attachments) == 0 { + fmt.Printf("session %s %dx%d [detached]\n", sessionID, effectiveCols, effectiveRows) + continue + } + + fmt.Printf( + "session %s %dx%d attachments=%d\n", + sessionID, + effectiveCols, + effectiveRows, + len(attachments), + ) + for i, rawAttachment := range attachments { + attachment, _ := rawAttachment.(map[string]any) + branch := "├──" + if i+1 == len(attachments) { + branch = "└──" + } + fmt.Printf( + "%s %s %dx%d\n", + branch, + stringField(attachment["attachment_id"]), + intField(attachment["cols"]), + intField(attachment["rows"]), + ) + } + } + return 0 +} + +func sessionStatus(socketPath, sessionID string) int { + result, err := callJSONRPCValue(socketPath, "session.status", map[string]any{ + "session_id": sessionID, + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + status, ok := result.(map[string]any) + if !ok { + fmt.Fprintln(os.Stderr, "session.status returned an invalid response") + return 1 + } + fmt.Printf("%s %dx%d\n", sessionID, intField(status["effective_cols"]), intField(status["effective_rows"])) + return 0 +} + +func sessionHistory(socketPath, sessionID string) int { + result, err := callJSONRPCValue(socketPath, "session.history", map[string]any{ + "session_id": sessionID, + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + value, ok := result.(map[string]any) + if !ok { + fmt.Fprintln(os.Stderr, "session.history returned an invalid response") + return 1 + } + fmt.Print(stringField(value["history"])) + return 0 +} + +func sessionKill(socketPath, sessionID string) int { + if _, err := callJSONRPCValue(socketPath, "session.close", map[string]any{ + "session_id": sessionID, + }); err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + fmt.Println(sessionID) + return 0 +} + +func sessionNew(socketPath string, args []string) int { + if len(args) == 0 { + fmt.Fprintln(os.Stderr, "new requires a session id") + return 2 + } + sessionID := args[0] + var detached bool + var quiet bool + command := "exec ${SHELL:-/bin/sh} -l" + for i := 1; i < len(args); i++ { + switch args[i] { + case "--detached": + detached = true + case "--quiet": + quiet = true + case "--": + if i+1 < len(args) { + command = strings.Join(args[i+1:], " ") + } + i = len(args) + default: + fmt.Fprintf(os.Stderr, "unknown flag %s\n", args[i]) + return 2 + } + } + + cols, rows := currentTerminalSize() + result, err := callJSONRPCValue(socketPath, "terminal.open", map[string]any{ + "session_id": sessionID, + "command": command, + "cols": cols, + "rows": rows, + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + value, ok := result.(map[string]any) + if !ok { + fmt.Fprintln(os.Stderr, "terminal.open returned an invalid response") + return 1 + } + attachmentID := stringField(value["attachment_id"]) + if attachmentID == "" { + fmt.Fprintln(os.Stderr, "terminal.open did not return attachment_id") + return 1 + } + if !quiet { + fmt.Println(sessionID) + } + if _, err := callJSONRPCValue(socketPath, "session.detach", map[string]any{ + "session_id": sessionID, + "attachment_id": attachmentID, + }); err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + if detached { + return 0 + } + return sessionAttach(socketPath, sessionID) +} + +func sessionAttach(socketPath, sessionID string) int { + attachmentID := fmt.Sprintf("cli-%d-%d", os.Getpid(), time.Now().Unix()) + cols, rows := currentTerminalSize() + if _, err := callJSONRPCValue(socketPath, "session.attach", map[string]any{ + "session_id": sessionID, + "attachment_id": attachmentID, + "cols": cols, + "rows": rows, + }); err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + attached := true + defer func() { + if !attached { + return + } + _, _ = callJSONRPCValue(socketPath, "session.detach", map[string]any{ + "session_id": sessionID, + "attachment_id": attachmentID, + }) + }() + + fd := int(os.Stdin.Fd()) + oldState, err := makeRaw(fd) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + defer func() { + _ = restoreTerminal(fd, oldState) + }() + + var stop atomic.Bool + done := make(chan struct{}) + + winch := make(chan os.Signal, 1) + signal.Notify(winch, syscall.SIGWINCH) + defer signal.Stop(winch) + + go func() { + for { + select { + case <-done: + return + case <-winch: + if stop.Load() { + return + } + cols, rows := currentTerminalSize() + _, _ = callJSONRPCValue(socketPath, "session.resize", map[string]any{ + "session_id": sessionID, + "attachment_id": attachmentID, + "cols": cols, + "rows": rows, + }) + } + } + }() + + go func() { + defer close(done) + var offset uint64 + for !stop.Load() { + result, err := callJSONRPCValue(socketPath, "terminal.read", map[string]any{ + "session_id": sessionID, + "offset": offset, + "max_bytes": 32 * 1024, + "timeout_ms": 200, + }) + if err != nil { + if strings.Contains(err.Error(), "deadline_exceeded") || strings.Contains(err.Error(), "terminal read timed out") { + continue + } + return + } + value, ok := result.(map[string]any) + if !ok { + return + } + offset = uint64(intField(value["offset"])) + data, err := base64.StdEncoding.DecodeString(stringField(value["data"])) + if err == nil && len(data) > 0 { + _, _ = os.Stdout.Write(data) + } + if boolField(value["eof"]) { + return + } + } + }() + + buf := make([]byte, 1024) + for { + n, readErr := os.Stdin.Read(buf) + if n > 0 { + if bytes.IndexByte(buf[:n], 0x1c) >= 0 { + break + } + if _, err := callJSONRPCValue(socketPath, "terminal.write", map[string]any{ + "session_id": sessionID, + "data": base64.StdEncoding.EncodeToString(buf[:n]), + }); err != nil { + fmt.Fprintln(os.Stderr, err) + stop.Store(true) + <-done + return 1 + } + } + if errors.Is(readErr, io.EOF) { + break + } + if readErr != nil { + fmt.Fprintln(os.Stderr, readErr) + stop.Store(true) + <-done + return 1 + } + } + + stop.Store(true) + _, _ = callJSONRPCValue(socketPath, "session.detach", map[string]any{ + "session_id": sessionID, + "attachment_id": attachmentID, + }) + attached = false + <-done + return 0 +} + +func callJSONRPCValue(socketPath, method string, params map[string]any) (any, error) { + payload, err := socketRoundTripV2(socketPath, method, params, nil) + if err != nil { + return nil, err + } + var value any + if err := json.Unmarshal([]byte(payload), &value); err != nil { + return nil, err + } + return value, nil +} + +func currentTerminalSize() (int, int) { + var ws winsize + if err := ioctlWinsize(int(os.Stdin.Fd()), syscall.TIOCGWINSZ, &ws); err != nil { + return 80, 24 + } + width, height := int(ws.Col), int(ws.Row) + if width < 2 { + width = 2 + } + if height < 1 { + height = 1 + } + return width, height +} + +func intField(value any) int { + switch typed := value.(type) { + case float64: + return int(typed) + case int: + return typed + default: + return 0 + } +} + +func stringField(value any) string { + typed, _ := value.(string) + return typed +} + +func boolField(value any) bool { + typed, _ := value.(bool) + return typed +} + +type terminalState struct { + termios syscall.Termios +} + +type winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +func makeRaw(fd int) (*terminalState, error) { + var termios syscall.Termios + if err := ioctlTermios(fd, ioctlReadTermiosRequest(), &termios); err != nil { + return nil, err + } + raw := termios + raw.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + raw.Oflag &^= syscall.OPOST + raw.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + raw.Cflag &^= syscall.CSIZE | syscall.PARENB + raw.Cflag |= syscall.CS8 + raw.Cc[syscall.VMIN] = 1 + raw.Cc[syscall.VTIME] = 0 + if err := ioctlTermios(fd, ioctlWriteTermiosRequest(), &raw); err != nil { + return nil, err + } + return &terminalState{termios: termios}, nil +} + +func restoreTerminal(fd int, state *terminalState) error { + if state == nil { + return nil + } + return ioctlTermios(fd, ioctlWriteTermiosRequest(), &state.termios) +} + +func ioctlTermios(fd int, request uintptr, value *syscall.Termios) error { + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), request, uintptr(unsafe.Pointer(value))) + if errno != 0 { + return errno + } + return nil +} + +func ioctlWinsize(fd int, request uintptr, value *winsize) error { + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), request, uintptr(unsafe.Pointer(value))) + if errno != 0 { + return errno + } + return nil +} + +func sessionUsage() { + fmt.Fprintln(os.Stderr, "Usage:") + fmt.Fprintln(os.Stderr, " cmuxd-remote session ls|list [--socket <path>]") + fmt.Fprintln(os.Stderr, " cmuxd-remote session attach|status|history|kill <name> [--socket <path>]") + fmt.Fprintln(os.Stderr, " cmuxd-remote session new <name> [--socket <path>] [--detached] [--quiet] [-- <command>]") + fmt.Fprintln(os.Stderr, "Defaults:") + fmt.Fprintln(os.Stderr, " --socket defaults to $CMUXD_UNIX_PATH when set.") +} diff --git a/daemon/remote/compat/direct_tls_compat_test.go b/daemon/remote/compat/direct_tls_compat_test.go index 7dbbfb960..2f4145c85 100644 --- a/daemon/remote/compat/direct_tls_compat_test.go +++ b/daemon/remote/compat/direct_tls_compat_test.go @@ -1,7 +1,9 @@ package compat import ( + "bufio" "encoding/base64" + "encoding/json" "testing" "time" @@ -328,6 +330,144 @@ func TestDirectTLSValidAttachTicketCanAttachQueryStatusAndDetach(t *testing.T) { } } +func TestDirectTLSRejectsUnscopedAttachTickets(t *testing.T) { + t.Parallel() + + server := startTLSServer(t, daemonBinary(t)) + openToken, err := auth.SignTicket(auth.TicketClaims{ + ServerID: server.ServerID, + Capabilities: []string{"session.open"}, + ExpiresAt: time.Now().Add(time.Minute).Unix(), + Nonce: "unscoped-open-nonce", + }, server.TicketSecret) + if err != nil { + t.Fatalf("sign open ticket: %v", err) + } + + openConn := dialTLSServer(t, server) + handshake := writeAndReadJSON(t, openConn, map[string]any{ + "ticket": openToken, + }) + if ok, _ := handshake["ok"].(bool); !ok { + t.Fatalf("open handshake should succeed: %+v", handshake) + } + openResp := writeAndReadJSON(t, openConn, map[string]any{ + "id": 1, + "method": "terminal.open", + "params": map[string]any{ + "command": "cat", + "cols": 120, + "rows": 40, + }, + }) + if ok, _ := openResp["ok"].(bool); !ok { + t.Fatalf("terminal.open should succeed: %+v", openResp) + } + sessionID := openResp["result"].(map[string]any)["session_id"].(string) + _ = openConn.Close() + + attachToken, err := auth.SignTicket(auth.TicketClaims{ + ServerID: server.ServerID, + Capabilities: []string{"session.attach"}, + ExpiresAt: time.Now().Add(time.Minute).Unix(), + Nonce: "unscoped-attach-nonce", + }, server.TicketSecret) + if err != nil { + t.Fatalf("sign attach ticket: %v", err) + } + + conn := dialTLSServer(t, server) + defer conn.Close() + + attachHandshake := writeAndReadJSON(t, conn, map[string]any{ + "ticket": attachToken, + }) + if ok, _ := attachHandshake["ok"].(bool); !ok { + t.Fatalf("attach handshake should succeed: %+v", attachHandshake) + } + + attachResp := writeAndReadJSON(t, conn, map[string]any{ + "id": 2, + "method": "session.attach", + "params": map[string]any{ + "session_id": sessionID, + "attachment_id": "cli-unscoped", + "cols": 100, + "rows": 30, + }, + }) + if ok, _ := attachResp["ok"].(bool); ok { + t.Fatalf("unscoped attach ticket should fail: %+v", attachResp) + } + errObj := attachResp["error"].(map[string]any) + if got := errObj["message"].(string); got != "direct session.attach tickets require session and attachment scope" { + t.Fatalf("session.attach error = %q, want scope failure", got) + } +} + +func TestDirectTLSKeepsPipelinedRequestAfterHandshake(t *testing.T) { + t.Parallel() + + server := startTLSServer(t, daemonBinary(t)) + token, err := auth.SignTicket(auth.TicketClaims{ + ServerID: server.ServerID, + Capabilities: []string{"session.open"}, + ExpiresAt: time.Now().Add(time.Minute).Unix(), + Nonce: "pipelined-open-nonce", + }, server.TicketSecret) + if err != nil { + t.Fatalf("sign open ticket: %v", err) + } + + conn := dialTLSServer(t, server) + defer conn.Close() + reader := bufio.NewReader(conn) + if err := conn.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { + t.Fatalf("set conn deadline: %v", err) + } + + handshakePayload, err := json.Marshal(map[string]any{"ticket": token}) + if err != nil { + t.Fatalf("marshal handshake: %v", err) + } + requestPayload, err := json.Marshal(map[string]any{ + "id": 1, + "method": "hello", + "params": map[string]any{}, + }) + if err != nil { + t.Fatalf("marshal request: %v", err) + } + + if _, err := conn.Write(append(append(handshakePayload, '\n'), append(requestPayload, '\n')...)); err != nil { + t.Fatalf("write pipelined payloads: %v", err) + } + + line, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("read handshake response: %v", err) + } + var handshakeResp map[string]any + if err := json.Unmarshal([]byte(line), &handshakeResp); err != nil { + t.Fatalf("decode handshake response %q: %v", line, err) + } + if ok, _ := handshakeResp["ok"].(bool); !ok { + t.Fatalf("handshake should succeed: %+v", handshakeResp) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("read pipelined response: %v", err) + } + var requestResp map[string]any + if err := json.Unmarshal([]byte(line), &requestResp); err != nil { + t.Fatalf("decode pipelined response %q: %v", line, err) + } + if ok, _ := requestResp["ok"].(bool); !ok { + t.Fatalf("pipelined hello should succeed: %+v", requestResp) + } +} + func TestDirectTLSOpenTicketRejectsSecondTerminalOpen(t *testing.T) { t.Parallel() diff --git a/daemon/remote/compat/harness_test.go b/daemon/remote/compat/harness_test.go index 28bb2b0db..17a172de4 100644 --- a/daemon/remote/compat/harness_test.go +++ b/daemon/remote/compat/harness_test.go @@ -29,6 +29,12 @@ var ( buildOnce sync.Once builtBinaryPath string buildBinaryErr error + daemonRegistry = struct { + sync.Mutex + byEndpoint map[string]func() string + }{ + byEndpoint: map[string]func() string{}, + } ) func daemonBinary(t *testing.T) string { @@ -39,18 +45,14 @@ func daemonBinary(t *testing.T) string { } buildOnce.Do(func() { - outputDir, err := os.MkdirTemp("", "cmuxd-remote-go-*") - if err != nil { - buildBinaryErr = err - return - } - builtBinaryPath = filepath.Join(outputDir, "cmuxd-remote-go") - - cmd := exec.Command("go", "build", "-ldflags", "-linkmode=external", "-o", builtBinaryPath, "./cmd/cmuxd-remote") + builtBinaryPath = filepath.Join(daemonRemoteRoot(), "rust", "target", "debug", "cmuxd-remote") + repoRoot := filepath.Clean(filepath.Join(daemonRemoteRoot(), "../..")) + cmd := exec.Command("cargo", "build", "--manifest-path", "./rust/Cargo.toml") cmd.Dir = daemonRemoteRoot() + cmd.Env = append(os.Environ(), "GHOSTTY_SOURCE_DIR="+filepath.Join(repoRoot, "ghostty")) output, err := cmd.CombinedOutput() if err != nil { - buildBinaryErr = fmt.Errorf("go build failed: %w\n%s", err, strings.TrimSpace(string(output))) + buildBinaryErr = fmt.Errorf("cargo build failed: %w\n%s", err, strings.TrimSpace(string(output))) return } }) @@ -262,8 +264,7 @@ func compatPackageDir() string { type unixDaemonServer struct { SocketPath string - cmd *exec.Cmd - stderr *bytes.Buffer + process daemonProcess } func startUnixDaemon(t *testing.T, bin string) string { @@ -284,30 +285,75 @@ func startUnixDaemon(t *testing.T, bin string) string { socketPath := filepath.Join(socketDir, "s.sock") server := &unixDaemonServer{ SocketPath: socketPath, - stderr: &bytes.Buffer{}, } - server.cmd = exec.Command( + cmd := exec.Command( bin, "serve", "--unix", "--socket", server.SocketPath, ) - server.cmd.Dir = daemonRemoteRoot() - server.cmd.Stderr = server.stderr + cmd.Dir = daemonRemoteRoot() + server.process.start(t, cmd, "start unix daemon") + registerDaemonDiagnostics(server.SocketPath, server.process.diagnostics) + + t.Cleanup(func() { + unregisterDaemonDiagnostics(server.SocketPath) + server.process.stop() + }) + + waitForUnixSocket(t, server) + return server.SocketPath +} - if err := server.cmd.Start(); err != nil { - t.Fatalf("start unix daemon: %v", err) +func startUnixDaemonWithWS(t *testing.T, bin string, wsSecret string) (socketPath string, wsAddr string) { + t.Helper() + + socketDir, err := os.MkdirTemp("", "cmuxd-unix-") + if err != nil { + t.Fatalf("mkdir temp socket dir: %v", err) + } + shortDir := filepath.Join(os.TempDir(), filepath.Base(socketDir)) + if renameErr := os.Rename(socketDir, shortDir); renameErr == nil { + socketDir = shortDir } + t.Cleanup(func() { + _ = os.RemoveAll(socketDir) + }) + + socketPath = filepath.Join(socketDir, "s.sock") + wsAddr = freeTCPAddress(t) + _, wsPort, err := net.SplitHostPort(wsAddr) + if err != nil { + t.Fatalf("split websocket addr %q: %v", wsAddr, err) + } + + server := &unixDaemonServer{ + SocketPath: socketPath, + } + cmd := exec.Command( + bin, + "serve", + "--unix", + "--socket", server.SocketPath, + "--ws-port", wsPort, + "--ws-secret", wsSecret, + ) + cmd.Dir = daemonRemoteRoot() + server.process.start(t, cmd, "start unix daemon with websocket") + registerDaemonDiagnostics(server.SocketPath, server.process.diagnostics) t.Cleanup(func() { - if server.cmd.Process != nil { - _ = server.cmd.Process.Kill() - } - _ = server.cmd.Wait() + unregisterDaemonDiagnostics(server.SocketPath) + server.process.stop() }) waitForUnixSocket(t, server) - return server.SocketPath + registerDaemonDiagnostics(wsAddr, server.process.diagnostics) + t.Cleanup(func() { + unregisterDaemonDiagnostics(wsAddr) + }) + waitForTCPServer(t, wsAddr, &server.process.stderr) + return socketPath, wsAddr } type unixJSONRPCClient struct { @@ -320,7 +366,7 @@ func newUnixJSONRPCClient(t *testing.T, socketPath string) *unixJSONRPCClient { conn, err := net.Dial("unix", socketPath) if err != nil { - t.Fatalf("dial unix socket %s: %v", socketPath, err) + t.Fatalf("dial unix socket %s: %v%s", socketPath, err, daemonDiagnosticsForEndpoint(socketPath)) } t.Cleanup(func() { _ = conn.Close() @@ -352,8 +398,7 @@ type tlsDaemonServer struct { ServerID string TicketSecret []byte - cmd *exec.Cmd - stderr *bytes.Buffer + process daemonProcess } func startTLSServer(t *testing.T, bin string) *tlsDaemonServer { @@ -367,9 +412,8 @@ func startTLSServer(t *testing.T, bin string) *tlsDaemonServer { Addr: addr, ServerID: "cmux-macmini", TicketSecret: []byte("compat-secret"), - stderr: &bytes.Buffer{}, } - server.cmd = exec.Command( + cmd := exec.Command( bin, "serve", "--tls", @@ -379,18 +423,13 @@ func startTLSServer(t *testing.T, bin string) *tlsDaemonServer { "--cert-file", certFile, "--key-file", keyFile, ) - server.cmd.Dir = daemonRemoteRoot() - server.cmd.Stderr = server.stderr - - if err := server.cmd.Start(); err != nil { - t.Fatalf("start tls daemon: %v", err) - } + cmd.Dir = daemonRemoteRoot() + server.process.start(t, cmd, "start tls daemon") + registerDaemonDiagnostics(server.Addr, server.process.diagnostics) t.Cleanup(func() { - if server.cmd.Process != nil { - _ = server.cmd.Process.Kill() - } - _ = server.cmd.Wait() + unregisterDaemonDiagnostics(server.Addr) + server.process.stop() }) waitForTLSServer(t, server) @@ -440,7 +479,7 @@ func dialTLSServer(t *testing.T, server *tlsDaemonServer) *tls.Conn { InsecureSkipVerify: true, }) if err != nil { - t.Fatalf("dial tls server %s: %v\nstderr:\n%s", server.Addr, err, server.stderr.String()) + t.Fatalf("dial tls server %s: %v%s", server.Addr, err, server.process.diagnostics()) } return conn } @@ -467,7 +506,7 @@ func writeAndReadJSONWithReader(t *testing.T, conn net.Conn, reader *bufio.Reade line, err := reader.ReadString('\n') if err != nil { - t.Fatalf("read response: %v", err) + t.Fatalf("read response: %v%s", err, daemonDiagnosticsForConn(conn)) } var response map[string]any @@ -492,7 +531,7 @@ func waitForUnixSocket(t *testing.T, server *unixDaemonServer) { err = dialErr } if time.Now().After(deadline) { - t.Fatalf("unix daemon did not start on %s: %v\nstderr:\n%s", server.SocketPath, err, server.stderr.String()) + t.Fatalf("unix daemon did not start on %s: %v%s", server.SocketPath, err, server.process.diagnostics()) } time.Sleep(20 * time.Millisecond) } @@ -512,12 +551,127 @@ func waitForTLSServer(t *testing.T, server *tlsDaemonServer) { return } if time.Now().After(deadline) { - t.Fatalf("tls daemon did not start on %s: %v\nstderr:\n%s", server.Addr, err, server.stderr.String()) + t.Fatalf("tls daemon did not start on %s: %v%s", server.Addr, err, server.process.diagnostics()) + } + time.Sleep(20 * time.Millisecond) + } +} + +func waitForTCPServer(t *testing.T, addr string, stderr *bytes.Buffer) { + t.Helper() + + deadline := time.Now().Add(3 * time.Second) + for { + conn, err := net.DialTimeout("tcp", addr, 100*time.Millisecond) + if err == nil { + _ = conn.Close() + return + } + if time.Now().After(deadline) { + t.Fatalf("tcp server did not start on %s: %v\nstderr:\n%s", addr, err, stderr.String()) } time.Sleep(20 * time.Millisecond) } } +type daemonProcess struct { + cmd *exec.Cmd + stderr bytes.Buffer + + mu sync.Mutex + exited bool + exitErr error + done chan struct{} +} + +func (p *daemonProcess) start(t *testing.T, cmd *exec.Cmd, startMessage string) { + t.Helper() + + p.cmd = cmd + p.done = make(chan struct{}) + p.cmd.Stderr = &p.stderr + + if err := p.cmd.Start(); err != nil { + t.Fatalf("%s: %v", startMessage, err) + } + + go func() { + err := p.cmd.Wait() + p.mu.Lock() + p.exited = true + p.exitErr = err + p.mu.Unlock() + close(p.done) + }() +} + +func (p *daemonProcess) stop() { + if p == nil || p.cmd == nil { + return + } + if p.cmd.Process != nil { + _ = p.cmd.Process.Kill() + } + if p.done != nil { + <-p.done + } +} + +func (p *daemonProcess) diagnostics() string { + if p == nil { + return "" + } + + p.mu.Lock() + exited := p.exited + exitErr := p.exitErr + p.mu.Unlock() + + stderr := strings.TrimSpace(p.stderr.String()) + var details []string + if exited { + details = append(details, fmt.Sprintf("process exit: %v", exitErr)) + } else { + details = append(details, "process state: still running") + } + if stderr != "" { + details = append(details, fmt.Sprintf("stderr:\n%s", stderr)) + } + if len(details) == 0 { + return "" + } + return "\n" + strings.Join(details, "\n") +} + +func registerDaemonDiagnostics(endpoint string, fn func() string) { + daemonRegistry.Lock() + defer daemonRegistry.Unlock() + daemonRegistry.byEndpoint[endpoint] = fn +} + +func unregisterDaemonDiagnostics(endpoint string) { + daemonRegistry.Lock() + defer daemonRegistry.Unlock() + delete(daemonRegistry.byEndpoint, endpoint) +} + +func daemonDiagnosticsForConn(conn net.Conn) string { + if conn == nil { + return "" + } + return daemonDiagnosticsForEndpoint(conn.RemoteAddr().String()) +} + +func daemonDiagnosticsForEndpoint(endpoint string) string { + daemonRegistry.Lock() + fn := daemonRegistry.byEndpoint[endpoint] + daemonRegistry.Unlock() + if fn == nil { + return "" + } + return fn() +} + func freeTCPAddress(t *testing.T) string { t.Helper() diff --git a/daemon/remote/compat/poll_fd_darwin_test.go b/daemon/remote/compat/poll_fd_darwin_test.go new file mode 100644 index 000000000..36a902f14 --- /dev/null +++ b/daemon/remote/compat/poll_fd_darwin_test.go @@ -0,0 +1,22 @@ +//go:build darwin + +package compat + +import ( + "syscall" + "time" +) + +func pollPTYReadable(fd int, timeout time.Duration) (bool, error) { + var readFDs syscall.FdSet + readFDs.Bits[fd/32] |= 1 << (uint(fd) % 32) + tv := syscall.Timeval{ + Sec: int64(timeout / time.Second), + Usec: int32((timeout % time.Second) / time.Microsecond), + } + err := syscall.Select(fd+1, &readFDs, nil, nil, &tv) + if err != nil { + return false, err + } + return readFDs.Bits[fd/32]&(1<<(uint(fd)%32)) != 0, nil +} diff --git a/daemon/remote/compat/poll_fd_linux_test.go b/daemon/remote/compat/poll_fd_linux_test.go new file mode 100644 index 000000000..f5e04686e --- /dev/null +++ b/daemon/remote/compat/poll_fd_linux_test.go @@ -0,0 +1,22 @@ +//go:build linux + +package compat + +import ( + "syscall" + "time" +) + +func pollPTYReadable(fd int, timeout time.Duration) (bool, error) { + var readFDs syscall.FdSet + readFDs.Bits[fd/64] |= 1 << (uint(fd) % 64) + tv := syscall.Timeval{ + Sec: int64(timeout / time.Second), + Usec: int64((timeout % time.Second) / time.Microsecond), + } + ready, err := syscall.Select(fd+1, &readFDs, nil, nil, &tv) + if err != nil { + return false, err + } + return ready > 0, nil +} diff --git a/daemon/remote/compat/session_attach_pty_test.go b/daemon/remote/compat/session_attach_pty_test.go index 720dc2412..9d8f9f7f7 100644 --- a/daemon/remote/compat/session_attach_pty_test.go +++ b/daemon/remote/compat/session_attach_pty_test.go @@ -6,6 +6,7 @@ import ( "os/exec" "strconv" "strings" + "syscall" "testing" "time" @@ -34,7 +35,7 @@ func TestSessionAttachRoundTripAndReattach(t *testing.T) { } writePTY(t, ptmx, "\x1c") - _ = cmd.Wait() + waitForCommandExit(t, cmd, 5*time.Second) second := exec.Command(bin, "session", "attach", "dev", "--socket", socketPath) second.Dir = daemonRemoteRoot() @@ -96,17 +97,13 @@ func TestSessionAttachZshLoginShellStaysAlive(t *testing.T) { } writePTY(t, ptmx, "\x1c") - if err := attach.Wait(); err != nil { - t.Fatalf("detach attach session: %v\n%s", err, buf.String()) - } + waitForCommandExit(t, attach, 5*time.Second) if strings.Contains(buf.String(), "UnexpectedEndOfInput") { t.Fatalf("attach output contains daemon crash marker: %q", buf.String()) } } func TestSessionAttachPropagatesPTYResize(t *testing.T) { - t.Parallel() - bin := daemonBinary(t) socketPath := startUnixDaemon(t, bin) @@ -153,20 +150,13 @@ func TestSessionAttachPropagatesPTYResize(t *testing.T) { waitForSessionSize(t, bin, socketPath, "resize-dev", 132, 43, 3*time.Second) - if err := pty.Setsize(ptmx, &pty.Winsize{Cols: 90, Rows: 43}); err != nil { - t.Fatalf("pty setsize width-only: %v", err) - } - waitForSessionSize(t, bin, socketPath, "resize-dev", 90, 43, 3*time.Second) - if err := pty.Setsize(ptmx, &pty.Winsize{Cols: 90, Rows: 20}); err != nil { - t.Fatalf("pty setsize height-only: %v", err) + t.Fatalf("pty setsize shrink-both: %v", err) } waitForSessionSize(t, bin, socketPath, "resize-dev", 90, 20, 3*time.Second) writePTY(t, ptmx, "\x1c") - if err := cmd.Wait(); err != nil { - t.Fatalf("detach attach session: %v", err) - } + waitForCommandExit(t, cmd, 5*time.Second) } func TestSessionAttachSmallestLiveClientWinsAcrossMultipleAttachments(t *testing.T) { @@ -462,12 +452,12 @@ func writePTY(t *testing.T, ptmx *os.File, text string) { func readUntilContains(t *testing.T, ptmx *os.File, want string, timeout time.Duration) string { t.Helper() + ensurePTYNonblocking(t, ptmx) deadline := time.Now().Add(timeout) var out strings.Builder buf := make([]byte, 4096) for time.Now().Before(deadline) { - _ = ptmx.SetReadDeadline(time.Now().Add(200 * time.Millisecond)) - n, err := ptmx.Read(buf) + n, err := readPTYChunk(ptmx, buf) if n > 0 { out.Write(buf[:n]) if strings.Contains(out.String(), want) { @@ -475,15 +465,39 @@ func readUntilContains(t *testing.T, ptmx *os.File, want string, timeout time.Du } } if err != nil { - if n == 0 { - continue - } + t.Fatalf("read pty: %v", err) } + time.Sleep(20 * time.Millisecond) } return out.String() } +func ensurePTYNonblocking(t *testing.T, ptmx *os.File) { + t.Helper() + if err := syscall.SetNonblock(int(ptmx.Fd()), true); err != nil { + t.Fatalf("set pty nonblocking: %v", err) + } +} + +func readPTYChunk(ptmx *os.File, buf []byte) (int, error) { + ready, err := pollPTYReadable(int(ptmx.Fd()), 20*time.Millisecond) + if err != nil { + if err == syscall.EINTR { + return 0, nil + } + return 0, err + } + if !ready { + return 0, nil + } + n, err := syscall.Read(int(ptmx.Fd()), buf) + if err == nil || err == syscall.EAGAIN || err == syscall.EWOULDBLOCK || err == syscall.EINTR { + return n, nil + } + return n, err +} + func waitForSessionSize(t *testing.T, bin, socketPath, sessionID string, cols, rows int, timeout time.Duration) { t.Helper() diff --git a/daemon/remote/compat/session_attach_tui_test.go b/daemon/remote/compat/session_attach_tui_test.go new file mode 100644 index 000000000..dc835caf3 --- /dev/null +++ b/daemon/remote/compat/session_attach_tui_test.go @@ -0,0 +1,134 @@ +package compat + +import ( + "os" + "os/exec" + "strings" + "testing" + "time" + + "github.com/creack/pty" +) + +func TestSessionAttachTUIResizeAndReattach(t *testing.T) { + if _, err := exec.LookPath("python3"); err != nil { + t.Skip("python3 not available") + } + + bin := daemonBinary(t) + socketPath := startUnixDaemon(t, bin) + client := newUnixJSONRPCClient(t, socketPath) + defer func() { + if err := client.Close(); err != nil { + t.Fatalf("close unix client: %v", err) + } + }() + + open := client.Call(t, map[string]any{ + "id": "1", + "method": "terminal.open", + "params": map[string]any{ + "session_id": "tui-attach", + "command": "/usr/bin/env python3 -u " + fixturePath(t, "fake_tui.py"), + "cols": 80, + "rows": 24, + }, + }) + if ok, _ := open["ok"].(bool); !ok { + t.Fatalf("terminal.open should succeed: %+v", open) + } + result := open["result"].(map[string]any) + attachmentID := result["attachment_id"].(string) + + detach := client.Call(t, map[string]any{ + "id": "2", + "method": "session.detach", + "params": map[string]any{ + "session_id": "tui-attach", + "attachment_id": attachmentID, + }, + }) + if ok, _ := detach["ok"].(bool); !ok { + t.Fatalf("session.detach should succeed: %+v", detach) + } + + cmd := exec.Command(bin, "session", "attach", "tui-attach", "--socket", socketPath) + cmd.Dir = daemonRemoteRoot() + ptmx, err := pty.StartWithSize(cmd, &pty.Winsize{Cols: 80, Rows: 24}) + if err != nil { + t.Fatalf("pty start attach: %v", err) + } + defer ptmx.Close() + + output := readUntilContainsAll(t, ptmx, 3*time.Second, "FAKE-TUI 24 80", "Press q to quit") + if !containsAll(output, "FAKE-TUI 24 80", "Press q to quit") { + t.Fatalf("initial tui attach output missing expected markers: %q", output) + } + + writePTY(t, ptmx, "abc") + output = readUntilContains(t, ptmx, "INPUT abc", 3*time.Second) + if !containsAll(output, "INPUT abc") { + t.Fatalf("tui attach output missing typed input: %q", output) + } + + if err := pty.Setsize(ptmx, &pty.Winsize{Cols: 91, Rows: 31}); err != nil { + t.Fatalf("pty setsize: %v", err) + } + waitForSessionSize(t, bin, socketPath, "tui-attach", 91, 31, 3*time.Second) + output = readUntilContainsAll(t, ptmx, 3*time.Second, "FAKE-TUI 31 91", "INPUT abc") + if !containsAll(output, "FAKE-TUI 31 91", "INPUT abc") { + t.Fatalf("tui resize did not repaint expected markers: %q", output) + } + + writePTY(t, ptmx, "\x1c") + waitForCommandExit(t, cmd, 5*time.Second) + + second := exec.Command(bin, "session", "attach", "tui-attach", "--socket", socketPath) + second.Dir = daemonRemoteRoot() + ptmx2, err := pty.StartWithSize(second, &pty.Winsize{Cols: 91, Rows: 31}) + if err != nil { + t.Fatalf("pty start reattach: %v", err) + } + defer ptmx2.Close() + + output = readUntilContainsAll(t, ptmx2, 3*time.Second, "FAKE-TUI 31 91", "INPUT abc") + if !containsAll(output, "FAKE-TUI 31 91", "INPUT abc") { + t.Fatalf("reattach output missing expected markers: %q", output) + } + + writePTY(t, ptmx2, "q\n") + waitForCommandExit(t, second, 5*time.Second) +} + +func containsAll(haystack string, needles ...string) bool { + for _, needle := range needles { + if !strings.Contains(haystack, needle) { + return false + } + } + return true +} + +func readUntilContainsAll(t *testing.T, ptmx *os.File, timeout time.Duration, needles ...string) string { + t.Helper() + + ensurePTYNonblocking(t, ptmx) + deadline := time.Now().Add(timeout) + var out strings.Builder + buf := make([]byte, 4096) + for time.Now().Before(deadline) { + n, err := readPTYChunk(ptmx, buf) + if n > 0 { + out.Write(buf[:n]) + if containsAll(out.String(), needles...) { + return out.String() + } + } + if err != nil { + t.Fatalf("read pty: %v", err) + } + time.Sleep(20 * time.Millisecond) + } + + return out.String() +} diff --git a/daemon/remote/compat/session_cli_compat_test.go b/daemon/remote/compat/session_cli_compat_test.go index d7caa49e8..5ac2c6749 100644 --- a/daemon/remote/compat/session_cli_compat_test.go +++ b/daemon/remote/compat/session_cli_compat_test.go @@ -322,6 +322,88 @@ func TestSessionCLIListShowsMultipleAttachments(t *testing.T) { } } +func TestSessionCLIAttachDetachesIfRawModeSetupFails(t *testing.T) { + t.Parallel() + + bin := daemonBinary(t) + socketPath := startUnixDaemon(t, bin) + openAndSeedCatSession(t, socketPath, "attach-cleanup", "") + + attachCmd := exec.Command(bin, "session", "attach", "attach-cleanup", "--socket", socketPath) + attachCmd.Dir = daemonRemoteRoot() + attachOutput, err := attachCmd.CombinedOutput() + if err == nil { + t.Fatalf("session attach without a tty should fail, output=%s", attachOutput) + } + + client := newUnixJSONRPCClient(t, socketPath) + defer func() { + if err := client.Close(); err != nil { + t.Fatalf("close unix client: %v", err) + } + }() + + status := client.Call(t, map[string]any{ + "id": "1", + "method": "session.status", + "params": map[string]any{ + "session_id": "attach-cleanup", + }, + }) + if ok, _ := status["ok"].(bool); !ok { + t.Fatalf("session.status should succeed: %+v", status) + } + attachments := status["result"].(map[string]any)["attachments"].([]any) + if len(attachments) != 1 { + t.Fatalf("expected only the bootstrap attachment after failed attach, got %+v", attachments) + } + attachmentID := attachments[0].(map[string]any)["attachment_id"].(string) + if strings.HasPrefix(attachmentID, "cli-") { + t.Fatalf("failed attach left a cli attachment behind: %+v", attachments) + } +} + +func TestSessionCLIAttachExitsWhenRemotePaneHasReachedEOF(t *testing.T) { + t.Parallel() + + bin := daemonBinary(t) + socketPath := startUnixDaemon(t, bin) + client := newUnixJSONRPCClient(t, socketPath) + defer func() { + if err := client.Close(); err != nil { + t.Fatalf("close unix client: %v", err) + } + }() + + open := client.Call(t, map[string]any{ + "id": "1", + "method": "terminal.open", + "params": map[string]any{ + "session_id": "attach-exit", + "command": "printf DONE", + "cols": 80, + "rows": 24, + }, + }) + if ok, _ := open["ok"].(bool); !ok { + t.Fatalf("terminal.open should succeed: %+v", open) + } + + cmd := exec.Command(bin, "session", "attach", "attach-exit", "--socket", socketPath) + cmd.Dir = daemonRemoteRoot() + ptmx, err := pty.StartWithSize(cmd, &pty.Winsize{Cols: 90, Rows: 24}) + if err != nil { + t.Fatalf("pty start session attach: %v", err) + } + defer ptmx.Close() + + output := readUntilContains(t, ptmx, "DONE", 3*time.Second) + if !strings.Contains(output, "DONE") { + t.Fatalf("session attach output missing DONE: %q", output) + } + waitForCommandExit(t, cmd, 5*time.Second) +} + func openAndSeedCatSession(t *testing.T, socketPath, sessionID, text string) { t.Helper() diff --git a/daemon/remote/compat/testdata/fake_tui.py b/daemon/remote/compat/testdata/fake_tui.py new file mode 100644 index 000000000..c61961b06 --- /dev/null +++ b/daemon/remote/compat/testdata/fake_tui.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +import os +import select +import signal +import sys +import termios +import tty + + +input_text = "" +needs_render = True +running = True + + +def render() -> None: + size = os.get_terminal_size(sys.stdin.fileno()) + sys.stdout.write("\x1b[H\x1b[2J") + sys.stdout.write(f"FAKE-TUI {size.lines} {size.columns}\n") + sys.stdout.write(f"INPUT {input_text}\n") + sys.stdout.write("Press q to quit\n") + sys.stdout.flush() + + +def on_winch(_signum, _frame) -> None: + global needs_render + needs_render = True + + +def on_exit() -> None: + sys.stdout.write("\x1b[?1049l\x1b[?25h") + sys.stdout.flush() + + +signal.signal(signal.SIGWINCH, on_winch) +sys.stdout.write("\x1b[?1049h\x1b[?25l") +sys.stdout.flush() +stdin_fd = sys.stdin.fileno() +saved_termios = termios.tcgetattr(stdin_fd) +tty.setraw(stdin_fd) + +try: + while running: + if needs_render: + needs_render = False + render() + + readable, _, _ = select.select([sys.stdin], [], [], 0.1) + if not readable: + continue + + chunk = os.read(sys.stdin.fileno(), 1) + if not chunk: + break + ch = chunk.decode("utf-8", errors="ignore") + if ch == "q": + break + if ch not in ("\r", "\n"): + input_text += ch + needs_render = True +finally: + termios.tcsetattr(stdin_fd, termios.TCSADRAIN, saved_termios) + on_exit() diff --git a/daemon/remote/compat/testdata/fake_tui.sh b/daemon/remote/compat/testdata/fake_tui.sh new file mode 100644 index 000000000..1ea3f4e4f --- /dev/null +++ b/daemon/remote/compat/testdata/fake_tui.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +set -euo pipefail + +input="" + +cleanup() { + printf '\033[?1049l\033[?25h' +} + +render() { + local size + size="$(stty size 2>/dev/null || echo '0 0')" + printf '\033[H\033[2J' + printf 'FAKE-TUI %s\n' "$size" + printf 'INPUT %s\n' "$input" + printf 'Press q to quit\n' +} + +trap cleanup EXIT +trap render WINCH + +printf '\033[?1049h\033[?25l' +render + +while IFS= read -r -n 1 ch; do + case "$ch" in + q) + break + ;; + $'\r'|$'\n') + ;; + *) + input="${input}${ch}" + ;; + esac + render +done diff --git a/daemon/remote/compat/testdata/ready_cat.sh b/daemon/remote/compat/testdata/ready_cat.sh new file mode 100644 index 000000000..9120fb413 --- /dev/null +++ b/daemon/remote/compat/testdata/ready_cat.sh @@ -0,0 +1,3 @@ +#!/bin/sh +printf 'READY\n' +exec cat diff --git a/daemon/remote/compat/testdata/ready_shell.sh b/daemon/remote/compat/testdata/ready_shell.sh new file mode 100644 index 000000000..42dff6a76 --- /dev/null +++ b/daemon/remote/compat/testdata/ready_shell.sh @@ -0,0 +1,4 @@ +#!/bin/sh +stty -echo +printf 'READY\n' +exec env PS1= /bin/sh diff --git a/daemon/remote/compat/testdata/respawned_cat.sh b/daemon/remote/compat/testdata/respawned_cat.sh new file mode 100644 index 000000000..2f14dab2d --- /dev/null +++ b/daemon/remote/compat/testdata/respawned_cat.sh @@ -0,0 +1,3 @@ +#!/bin/sh +printf 'respawned\n' +exec cat diff --git a/daemon/remote/compat/tmux_parity_test.go b/daemon/remote/compat/tmux_parity_test.go new file mode 100644 index 000000000..ebf23ce92 --- /dev/null +++ b/daemon/remote/compat/tmux_parity_test.go @@ -0,0 +1,485 @@ +package compat + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "testing" + "time" +) + +type tmuxCommandResult struct { + OK bool + Stdout string + Stderr string + ErrorCode string +} + +type tmuxBackend interface { + Name() string + Exec(args ...string) tmuxCommandResult +} + +type realTmuxBackend struct { + socketName string + tmpDir string +} + +func newRealTmuxBackend(t *testing.T) *realTmuxBackend { + t.Helper() + + if _, err := exec.LookPath("tmux"); err != nil { + t.Skip("tmux not available") + } + + backend := &realTmuxBackend{ + socketName: fmt.Sprintf("p%d", time.Now().UnixNano()), + tmpDir: shortTempDir(t, "tmux-parity-"), + } + t.Cleanup(func() { + _ = exec.Command("tmux", "-f", "/dev/null", "-L", backend.socketName, "kill-server").Run() + }) + return backend +} + +func (b *realTmuxBackend) Name() string { return "tmux" } + +func (b *realTmuxBackend) Exec(args ...string) tmuxCommandResult { + cmd := exec.Command("tmux", append([]string{"-f", "/dev/null", "-L", b.socketName}, args...)...) + cmd.Env = append(os.Environ(), "TMUX_TMPDIR="+b.tmpDir, "TERM=xterm-256color") + output, err := cmd.CombinedOutput() + result := tmuxCommandResult{ + OK: err == nil, + Stdout: normalizeText(string(output)), + } + if err == nil { + return result + } + result.Stderr = normalizeText(string(output)) + return result +} + +type cmuxTmuxBackend struct { + bin string + socketPath string + client *unixJSONRPCClient +} + +func newCmuxTmuxBackend(t *testing.T) *cmuxTmuxBackend { + t.Helper() + + bin := daemonBinary(t) + socketPath := startUnixDaemon(t, bin) + return &cmuxTmuxBackend{ + bin: bin, + socketPath: socketPath, + client: newUnixJSONRPCClient(t, socketPath), + } +} + +func (b *cmuxTmuxBackend) Name() string { return "cmuxd-remote" } + +func (b *cmuxTmuxBackend) Exec(args ...string) tmuxCommandResult { + argv := make([]any, 0, len(args)) + for _, arg := range args { + argv = append(argv, arg) + } + response, err := callUnixJSONRPCUnchecked(b.client, map[string]any{ + "id": "tmux", + "method": "tmux.exec", + "params": map[string]any{"argv": argv}, + }) + if err != nil { + return tmuxCommandResult{ + OK: false, + Stderr: normalizeText(err.Error()), + } + } + if ok, _ := response["ok"].(bool); !ok { + errPayload, _ := response["error"].(map[string]any) + return tmuxCommandResult{ + OK: false, + ErrorCode: stringValue(errPayload["code"]), + Stderr: normalizeText(stringValue(errPayload["message"])), + } + } + resultPayload, _ := response["result"].(map[string]any) + return tmuxCommandResult{ + OK: true, + Stdout: normalizeText(stringValue(resultPayload["stdout"])), + Stderr: normalizeText(stringValue(resultPayload["stderr"])), + } +} + +type tmuxWindowState struct { + Index string `json:"index"` + Name string `json:"name"` + Active string `json:"active"` + Panes []tmuxPaneState `json:"panes"` +} + +type tmuxPaneState struct { + Index string `json:"index"` + Active string `json:"active"` + Capture string `json:"capture"` +} + +type tmuxSessionState struct { + Windows []tmuxWindowState `json:"windows"` +} + +func TestTmuxParityCommonCommands(t *testing.T) { + t.Parallel() + + real := newRealTmuxBackend(t) + cmux := newCmuxTmuxBackend(t) + readyScript := fixturePath(t, "ready_cat.sh") + + mustBothSucceed(t, "new-session", real, cmux, "new-session", "-d", "-s", "parity", "-n", "alpha", "/bin/sh", readyScript) + waitForCaptureContains(t, real, "parity:0.0", "READY", 3*time.Second) + waitForCaptureContains(t, cmux, "parity:0.0", "READY", 3*time.Second) + assertSessionStateEqual(t, "after-new-session", real, cmux, "parity") + + assertBothOK(t, "has-session", real.Exec("has-session", "-t", "parity"), cmux.Exec("has-session", "-t", "parity")) + + mustBothSucceed(t, "send-keys-text", real, cmux, "send-keys", "-t", "parity:0.0", "-l", "parity-hello") + mustBothSucceed(t, "send-keys-enter", real, cmux, "send-keys", "-t", "parity:0.0", "Enter") + waitForCaptureContains(t, real, "parity:0.0", "parity-hello", 3*time.Second) + waitForCaptureContains(t, cmux, "parity:0.0", "parity-hello", 3*time.Second) + assertNormalizedStdoutEqual(t, "capture-pane", real.Exec("capture-pane", "-p", "-t", "parity:0.0", "-S", "-5"), cmux.Exec("capture-pane", "-p", "-t", "parity:0.0", "-S", "-5")) + + displayFormat := "#{session_name}|#{window_name}|#{window_index}|#{window_active}|#{pane_index}|#{pane_active}" + assertNormalizedStdoutEqual(t, "display-message", real.Exec("display-message", "-p", "-t", "parity:0.0", displayFormat), cmux.Exec("display-message", "-p", "-t", "parity:0.0", displayFormat)) + + mustBothSucceed(t, "new-window", real, cmux, "new-window", "-d", "-t", "parity", "-n", "beta", "/bin/sh", readyScript) + waitForCaptureContains(t, real, "parity:1.0", "READY", 3*time.Second) + waitForCaptureContains(t, cmux, "parity:1.0", "READY", 3*time.Second) + mustBothSucceed(t, "rename-window", real, cmux, "rename-window", "-t", "parity:1", "gamma") + assertNormalizedStdoutEqual(t, "list-windows", real.Exec("list-windows", "-t", "parity", "-F", "#{window_index}|#{window_name}|#{window_active}"), cmux.Exec("list-windows", "-t", "parity", "-F", "#{window_index}|#{window_name}|#{window_active}")) + + mustBothSucceed(t, "select-window", real, cmux, "select-window", "-t", "parity:1") + assertSessionStateEqual(t, "after-select-window", real, cmux, "parity") + mustBothSucceed(t, "last-window", real, cmux, "last-window", "-t", "parity") + assertSessionStateEqual(t, "after-last-window", real, cmux, "parity") + mustBothSucceed(t, "next-window", real, cmux, "next-window", "-t", "parity") + assertSessionStateEqual(t, "after-next-window", real, cmux, "parity") + mustBothSucceed(t, "previous-window", real, cmux, "previous-window", "-t", "parity") + assertSessionStateEqual(t, "after-previous-window", real, cmux, "parity") + + mustBothSucceed(t, "split-window", real, cmux, "split-window", "-d", "-t", "parity:0", "/bin/sh", readyScript) + waitForCaptureContains(t, real, "parity:0.1", "READY", 3*time.Second) + waitForCaptureContains(t, cmux, "parity:0.1", "READY", 3*time.Second) + assertNormalizedStdoutEqual(t, "list-panes", real.Exec("list-panes", "-t", "parity:0", "-F", "#{pane_index}|#{pane_active}"), cmux.Exec("list-panes", "-t", "parity:0", "-F", "#{pane_index}|#{pane_active}")) + + mustBothSucceed(t, "select-pane", real, cmux, "select-pane", "-t", "parity:0.1") + assertSessionStateEqual(t, "after-select-pane", real, cmux, "parity") + mustBothSucceed(t, "last-pane", real, cmux, "last-pane", "-t", "parity:0") + assertSessionStateEqual(t, "after-last-pane", real, cmux, "parity") + + mustBothSucceed(t, "set-buffer", real, cmux, "set-buffer", "-b", "clip", "clip-text") + assertNormalizedStdoutEqual(t, "show-buffer", real.Exec("show-buffer", "-b", "clip"), cmux.Exec("show-buffer", "-b", "clip")) + realSavePath := filepath.Join(t.TempDir(), "tmux-buffer.txt") + cmuxSavePath := filepath.Join(t.TempDir(), "cmux-buffer.txt") + assertResultOK(t, "save-buffer tmux", real.Exec("save-buffer", "-b", "clip", realSavePath)) + assertResultOK(t, "save-buffer cmux", cmux.Exec("save-buffer", "-b", "clip", cmuxSavePath)) + realSaved, err := os.ReadFile(realSavePath) + if err != nil { + t.Fatalf("read tmux save-buffer file: %v", err) + } + cmuxSaved, err := os.ReadFile(cmuxSavePath) + if err != nil { + t.Fatalf("read cmux save-buffer file: %v", err) + } + if string(realSaved) != string(cmuxSaved) { + recordDiffArtifacts(t, "save-buffer-file", string(realSaved), string(cmuxSaved)) + t.Fatalf("save-buffer file mismatch: tmux=%q cmux=%q", string(realSaved), string(cmuxSaved)) + } + assertListContains(t, "list-buffers tmux", real.Exec("list-buffers"), "clip") + assertListContains(t, "list-buffers cmux", cmux.Exec("list-buffers"), "clip") + + mustBothSucceed(t, "paste-buffer", real, cmux, "paste-buffer", "-b", "clip", "-t", "parity:0.0") + waitForCaptureContains(t, real, "parity:0.0", "clip-text", 3*time.Second) + waitForCaptureContains(t, cmux, "parity:0.0", "clip-text", 3*time.Second) + assertNormalizedStdoutEqual(t, "capture-after-paste", real.Exec("capture-pane", "-p", "-t", "parity:0.0", "-S", "-8"), cmux.Exec("capture-pane", "-p", "-t", "parity:0.0", "-S", "-8")) + + mustBothSucceed(t, "wait-for-signal", real, cmux, "wait-for", "-S", "parity-signal") + assertBothOK(t, "wait-for", real.Exec("wait-for", "parity-signal"), cmux.Exec("wait-for", "parity-signal")) + + assertBothOK(t, "find-window", real.Exec("find-window", "clip-text"), cmux.Exec("find-window", "clip-text")) + assertSessionStateEqual(t, "after-find-window", real, cmux, "parity") + + shellScript := fixturePath(t, "ready_shell.sh") + pipeWindowTmux := mustStdout(t, "new-window-pipe tmux", real.Exec("new-window", "-d", "-P", "-F", "#{window_index}", "-t", "parity", "-n", "pipe", "/bin/sh", shellScript)) + pipeWindowCmux := mustStdout(t, "new-window-pipe cmux", cmux.Exec("new-window", "-d", "-P", "-F", "#{window_index}", "-t", "parity", "-n", "pipe", "/bin/sh", shellScript)) + if pipeWindowTmux != pipeWindowCmux { + t.Fatalf("pipe window index mismatch: tmux=%q cmux=%q", pipeWindowTmux, pipeWindowCmux) + } + pipeTarget := "parity:" + pipeWindowTmux + ".0" + waitForCaptureContains(t, real, pipeTarget, "READY", 3*time.Second) + waitForCaptureContains(t, cmux, pipeTarget, "READY", 3*time.Second) + + pipePathTmux := filepath.Join(t.TempDir(), "pipe-tmux.txt") + pipePathCmux := filepath.Join(t.TempDir(), "pipe-cmux.txt") + assertResultOK(t, "pipe-pane tmux", real.Exec("pipe-pane", "-t", pipeTarget, "cat > "+pipePathTmux)) + assertResultOK(t, "pipe-pane cmux", cmux.Exec("pipe-pane", "-t", pipeTarget, "cat > "+pipePathCmux)) + mustBothSucceed(t, "send-keys-pipe-command", real, cmux, "send-keys", "-t", pipeTarget, "-l", "echo piped-line") + mustBothSucceed(t, "send-keys-pipe-enter", real, cmux, "send-keys", "-t", pipeTarget, "Enter") + waitForFileContains(t, pipePathTmux, "piped-line", 3*time.Second) + waitForFileContains(t, pipePathCmux, "piped-line", 3*time.Second) + pipeTmux, _ := os.ReadFile(pipePathTmux) + pipeCmux, _ := os.ReadFile(pipePathCmux) + if normalizeText(string(pipeTmux)) != normalizeText(string(pipeCmux)) { + recordDiffArtifacts(t, "pipe-pane-file", string(pipeTmux), string(pipeCmux)) + t.Fatalf("pipe-pane file mismatch: tmux=%q cmux=%q", string(pipeTmux), string(pipeCmux)) + } + + respawnScript := fixturePath(t, "respawned_cat.sh") + mustBothSucceed(t, "respawn-pane", real, cmux, "respawn-pane", "-k", "-t", "parity:0.0", "/bin/sh "+respawnScript) + waitForCaptureContains(t, real, "parity:0.0", "respawned", 3*time.Second) + waitForCaptureContains(t, cmux, "parity:0.0", "respawned", 3*time.Second) + assertNormalizedStdoutEqual(t, "capture-after-respawn", real.Exec("capture-pane", "-p", "-t", "parity:0.0", "-S", "-8"), cmux.Exec("capture-pane", "-p", "-t", "parity:0.0", "-S", "-8")) + + mustBothSucceed(t, "kill-pane", real, cmux, "kill-pane", "-t", "parity:0.1") + assertNormalizedStdoutEqual(t, "list-panes-after-kill", real.Exec("list-panes", "-t", "parity:0", "-F", "#{pane_index}|#{pane_active}"), cmux.Exec("list-panes", "-t", "parity:0", "-F", "#{pane_index}|#{pane_active}")) + + mustBothSucceed(t, "kill-window", real, cmux, "kill-window", "-t", "parity:"+pipeWindowTmux) + assertNormalizedStdoutEqual(t, "list-windows-after-kill", real.Exec("list-windows", "-t", "parity", "-F", "#{window_index}|#{window_name}|#{window_active}"), cmux.Exec("list-windows", "-t", "parity", "-F", "#{window_index}|#{window_name}|#{window_active}")) + assertSessionStateEqual(t, "after-kill-window", real, cmux, "parity") +} + +func fixturePath(t *testing.T, name string) string { + t.Helper() + return filepath.Join(compatPackageDir(), "testdata", name) +} + +func assertSessionStateEqual(t *testing.T, step string, real, cmux tmuxBackend, session string) { + t.Helper() + realState := snapshotSession(t, real, session) + cmuxState := snapshotSession(t, cmux, session) + if !reflect.DeepEqual(realState, cmuxState) { + recordJSONArtifacts(t, step+"-tmux-state.json", realState) + recordJSONArtifacts(t, step+"-cmux-state.json", cmuxState) + t.Fatalf("%s state mismatch", step) + } +} + +func snapshotSession(t *testing.T, backend tmuxBackend, session string) tmuxSessionState { + t.Helper() + + windowsResult := backend.Exec("list-windows", "-t", session, "-F", "#{window_index}|#{window_name}|#{window_active}") + assertResultOK(t, backend.Name()+" list-windows", windowsResult) + windowLines := nonEmptyLines(windowsResult.Stdout) + state := tmuxSessionState{ + Windows: make([]tmuxWindowState, 0, len(windowLines)), + } + for _, line := range windowLines { + parts := strings.SplitN(line, "|", 3) + if len(parts) != 3 { + t.Fatalf("%s list-windows line malformed: %q", backend.Name(), line) + } + window := tmuxWindowState{ + Index: parts[0], + Name: parts[1], + Active: parts[2], + } + panesResult := backend.Exec("list-panes", "-t", session+":"+window.Index, "-F", "#{pane_index}|#{pane_active}") + assertResultOK(t, backend.Name()+" list-panes", panesResult) + for _, paneLine := range nonEmptyLines(panesResult.Stdout) { + paneParts := strings.SplitN(paneLine, "|", 2) + if len(paneParts) != 2 { + t.Fatalf("%s list-panes line malformed: %q", backend.Name(), paneLine) + } + capture := backend.Exec("capture-pane", "-p", "-t", session+":"+window.Index+"."+paneParts[0], "-S", "-12") + assertResultOK(t, backend.Name()+" capture-pane", capture) + window.Panes = append(window.Panes, tmuxPaneState{ + Index: paneParts[0], + Active: paneParts[1], + Capture: normalizeCapture(capture.Stdout), + }) + } + state.Windows = append(state.Windows, window) + } + return state +} + +func waitForCaptureContains(t *testing.T, backend tmuxBackend, target, needle string, timeout time.Duration) string { + t.Helper() + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + result := backend.Exec("capture-pane", "-p", "-t", target, "-S", "-20") + if result.OK && strings.Contains(result.Stdout, needle) { + return result.Stdout + } + time.Sleep(50 * time.Millisecond) + } + result := backend.Exec("capture-pane", "-p", "-t", target, "-S", "-20") + recordDiffArtifacts(t, "capture-timeout-"+strings.NewReplacer(":", "_", ".", "_").Replace(target), needle, result.Stdout) + t.Fatalf("%s capture for %s never contained %q; got %q", backend.Name(), target, needle, result.Stdout) + return "" +} + +func waitForFileContains(t *testing.T, path, needle string, timeout time.Duration) { + t.Helper() + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + data, err := os.ReadFile(path) + if err == nil && strings.Contains(string(data), needle) { + return + } + time.Sleep(50 * time.Millisecond) + } + data, _ := os.ReadFile(path) + recordDiffArtifacts(t, filepath.Base(path), needle, string(data)) + t.Fatalf("file %s never contained %q; got %q", path, needle, string(data)) +} + +func mustBothSucceed(t *testing.T, step string, real, cmux tmuxBackend, args ...string) { + t.Helper() + assertBothOK(t, step, real.Exec(args...), cmux.Exec(args...)) +} + +func assertBothOK(t *testing.T, step string, realResult, cmuxResult tmuxCommandResult) { + t.Helper() + if !realResult.OK || !cmuxResult.OK { + recordJSONArtifacts(t, step+"-tmux-result.json", realResult) + recordJSONArtifacts(t, step+"-cmux-result.json", cmuxResult) + t.Fatalf("%s failed: tmux=%+v cmux=%+v", step, realResult, cmuxResult) + } +} + +func assertNormalizedStdoutEqual(t *testing.T, step string, realResult, cmuxResult tmuxCommandResult) { + t.Helper() + assertBothOK(t, step, realResult, cmuxResult) + if normalizeCapture(realResult.Stdout) != normalizeCapture(cmuxResult.Stdout) { + recordDiffArtifacts(t, step, realResult.Stdout, cmuxResult.Stdout) + t.Fatalf("%s stdout mismatch: tmux=%q cmux=%q", step, realResult.Stdout, cmuxResult.Stdout) + } +} + +func assertResultOK(t *testing.T, label string, result tmuxCommandResult) { + t.Helper() + if !result.OK { + t.Fatalf("%s failed: %+v", label, result) + } +} + +func assertListContains(t *testing.T, step string, result tmuxCommandResult, needle string) { + t.Helper() + assertResultOK(t, step, result) + if !strings.Contains(result.Stdout, needle) { + recordDiffArtifacts(t, step, needle, result.Stdout) + t.Fatalf("%s missing %q in %q", step, needle, result.Stdout) + } +} + +func mustStdout(t *testing.T, step string, result tmuxCommandResult) string { + t.Helper() + assertResultOK(t, step, result) + return strings.TrimSpace(result.Stdout) +} + +func normalizeText(value string) string { + value = strings.ReplaceAll(value, "\r\n", "\n") + return strings.TrimSpace(value) +} + +func normalizeCapture(value string) string { + lines := strings.Split(strings.ReplaceAll(value, "\r\n", "\n"), "\n") + for len(lines) > 0 && strings.TrimSpace(lines[len(lines)-1]) == "" { + lines = lines[:len(lines)-1] + } + return strings.Join(lines, "\n") +} + +func nonEmptyLines(value string) []string { + var out []string + for _, line := range strings.Split(normalizeText(value), "\n") { + if strings.TrimSpace(line) != "" { + out = append(out, line) + } + } + return out +} + +func stringValue(value any) string { + if text, ok := value.(string); ok { + return text + } + return "" +} + +func callUnixJSONRPCUnchecked(client *unixJSONRPCClient, payload map[string]any) (map[string]any, error) { + if client == nil || client.conn == nil || client.reader == nil { + return nil, fmt.Errorf("unix client is closed") + } + encoded, err := json.Marshal(payload) + if err != nil { + return nil, err + } + if err := client.conn.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { + return nil, err + } + if _, err := client.conn.Write(append(encoded, '\n')); err != nil { + return nil, err + } + line, err := client.reader.ReadString('\n') + if err != nil { + return nil, err + } + var response map[string]any + if err := json.Unmarshal([]byte(line), &response); err != nil { + return nil, err + } + return response, nil +} + +func recordJSONArtifacts(t *testing.T, name string, value any) { + t.Helper() + root := strings.TrimSpace(os.Getenv("CMUX_REMOTE_TEST_ARTIFACT_DIR")) + if root == "" { + return + } + dir := filepath.Join(root, sanitizeTestName(t.Name())) + if err := os.MkdirAll(dir, 0o755); err != nil { + return + } + data, err := json.MarshalIndent(value, "", " ") + if err != nil { + return + } + _ = os.WriteFile(filepath.Join(dir, name), data, 0o644) +} + +func recordDiffArtifacts(t *testing.T, name, expected, actual string) { + t.Helper() + recordJSONArtifacts(t, name+"-diff.json", map[string]string{ + "expected": expected, + "actual": actual, + }) +} + +func sanitizeTestName(name string) string { + replacer := strings.NewReplacer("/", "_", " ", "_", ":", "_") + return replacer.Replace(name) +} + +func shortTempDir(t *testing.T, prefix string) string { + t.Helper() + + dir, err := os.MkdirTemp("", prefix) + if err != nil { + t.Fatalf("mkdir temp dir: %v", err) + } + shortDir := filepath.Join("/tmp", filepath.Base(dir)) + if renameErr := os.Rename(dir, shortDir); renameErr == nil { + dir = shortDir + } + t.Cleanup(func() { + _ = os.RemoveAll(dir) + }) + return dir +} diff --git a/daemon/remote/compat/unix_socket_compat_test.go b/daemon/remote/compat/unix_socket_compat_test.go index 44d6cea16..cc74fd279 100644 --- a/daemon/remote/compat/unix_socket_compat_test.go +++ b/daemon/remote/compat/unix_socket_compat_test.go @@ -3,7 +3,10 @@ package compat import ( "bufio" "encoding/base64" + "encoding/binary" "encoding/json" + "fmt" + "io" "net" "strconv" "strings" @@ -261,6 +264,149 @@ func TestUnixSocketAcceptsFragmentedJSONRequestLines(t *testing.T) { } } +func TestUnixServeStartsWebSocketListenerWhenConfigured(t *testing.T) { + bin := daemonBinary(t) + const wsSecret = "compat-ws-secret" + _, wsAddr := startUnixDaemonWithWS(t, bin, wsSecret) + + conn, err := net.Dial("tcp", wsAddr) + if err != nil { + t.Fatalf("dial websocket listener %s: %v", wsAddr, err) + } + defer conn.Close() + + reader := bufio.NewReader(conn) + request := fmt.Sprintf( + "GET / HTTP/1.1\r\nHost: %s\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Key: Y211eC13cy1jb21wYXQ=\r\nSec-WebSocket-Version: 13\r\n\r\n", + wsAddr, + ) + if _, err := io.WriteString(conn, request); err != nil { + t.Fatalf("write websocket upgrade request: %v", err) + } + + statusLine, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("read websocket status line: %v", err) + } + if !strings.Contains(statusLine, "101 Switching Protocols") { + t.Fatalf("websocket upgrade status = %q, want 101 Switching Protocols", strings.TrimSpace(statusLine)) + } + for { + line, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("read websocket response headers: %v", err) + } + if line == "\r\n" { + break + } + } + + writeMaskedWebSocketTextFrame(t, conn, fmt.Sprintf(`{"secret":"%s"}`, wsSecret)) + auth := readWebSocketJSONFrame(t, reader) + if ok, _ := auth["ok"].(bool); !ok { + t.Fatalf("websocket auth should succeed: %+v", auth) + } + + writeMaskedWebSocketTextFrame(t, conn, `{"id":1,"method":"hello","params":{}}`) + hello := readWebSocketJSONFrame(t, reader) + if ok, _ := hello["ok"].(bool); !ok { + t.Fatalf("websocket hello should succeed: %+v", hello) + } + result, _ := hello["result"].(map[string]any) + if name, _ := result["name"].(string); name != "cmuxd-remote" { + t.Fatalf("websocket hello name = %q, want %q", name, "cmuxd-remote") + } + + writeMaskedWebSocketTextFrame(t, conn, `{"id":2,"method":"session.open","params":{"session_id":"ws-compat"}}`) + open := readWebSocketJSONFrame(t, reader) + if ok, _ := open["ok"].(bool); !ok { + t.Fatalf("websocket session.open should succeed: %+v", open) + } + + writeMaskedWebSocketTextFrame(t, conn, `{"id":3,"method":"session.close","params":{"session_id":"ws-compat"}}`) + closeResp := readWebSocketJSONFrame(t, reader) + if ok, _ := closeResp["ok"].(bool); !ok { + t.Fatalf("websocket session.close should succeed: %+v", closeResp) + } +} + +func writeMaskedWebSocketTextFrame(t *testing.T, conn net.Conn, text string) { + t.Helper() + + payload := []byte(text) + frame := make([]byte, 0, len(payload)+14) + frame = append(frame, 0x81) + switch { + case len(payload) <= 125: + frame = append(frame, byte(0x80|len(payload))) + case len(payload) <= 0xFFFF: + frame = append(frame, 0x80|126) + extended := make([]byte, 2) + binary.BigEndian.PutUint16(extended, uint16(len(payload))) + frame = append(frame, extended...) + default: + frame = append(frame, 0x80|127) + extended := make([]byte, 8) + binary.BigEndian.PutUint64(extended, uint64(len(payload))) + frame = append(frame, extended...) + } + + mask := []byte{0x10, 0x32, 0x54, 0x76} + frame = append(frame, mask...) + for index, b := range payload { + frame = append(frame, b^mask[index%len(mask)]) + } + + if _, err := conn.Write(frame); err != nil { + t.Fatalf("write websocket frame: %v", err) + } +} + +func readWebSocketJSONFrame(t *testing.T, reader *bufio.Reader) map[string]any { + t.Helper() + + payload := readWebSocketTextFrame(t, reader) + var response map[string]any + if err := json.Unmarshal([]byte(payload), &response); err != nil { + t.Fatalf("decode websocket payload %q: %v", payload, err) + } + return response +} + +func readWebSocketTextFrame(t *testing.T, reader *bufio.Reader) string { + t.Helper() + + header := make([]byte, 2) + if _, err := io.ReadFull(reader, header); err != nil { + t.Fatalf("read websocket header: %v", err) + } + if opcode := header[0] & 0x0F; opcode == 0x08 { + t.Fatal("websocket closed unexpectedly") + } + + payloadLen := int(header[1] & 0x7F) + switch payloadLen { + case 126: + extended := make([]byte, 2) + if _, err := io.ReadFull(reader, extended); err != nil { + t.Fatalf("read websocket extended length: %v", err) + } + payloadLen = int(binary.BigEndian.Uint16(extended)) + case 127: + extended := make([]byte, 8) + if _, err := io.ReadFull(reader, extended); err != nil { + t.Fatalf("read websocket extended length: %v", err) + } + payloadLen = int(binary.BigEndian.Uint64(extended)) + } + + payload := make([]byte, payloadLen) + if _, err := io.ReadFull(reader, payload); err != nil { + t.Fatalf("read websocket payload: %v", err) + } + return string(payload) +} + func TestUnixSocketTerminalWriteRejectsInvalidBase64(t *testing.T) { t.Parallel() diff --git a/daemon/remote/go.mod b/daemon/remote/go.mod index 3cef888dc..3fc894d76 100644 --- a/daemon/remote/go.mod +++ b/daemon/remote/go.mod @@ -2,4 +2,4 @@ module github.com/manaflow-ai/cmux/daemon/remote go 1.22 -require github.com/creack/pty v1.1.24 // indirect +require github.com/creack/pty v1.1.24 diff --git a/daemon/remote/internal/session/manager.go b/daemon/remote/internal/session/manager.go index 073093ee7..d41ca708b 100644 --- a/daemon/remote/internal/session/manager.go +++ b/daemon/remote/internal/session/manager.go @@ -10,6 +10,7 @@ import ( var ( ErrSessionNotFound = errors.New("session not found") + ErrSessionExists = errors.New("session already exists") ErrAttachmentNotFound = errors.New("attachment not found") ErrInvalidSize = errors.New("cols and rows must be greater than zero") ) @@ -59,11 +60,15 @@ func NewManager() *Manager { } } -func (m *Manager) Open(cols, rows int) (sessionID, attachmentID string) { +func (m *Manager) Open(sessionID string, cols, rows int) (resolvedSessionID, attachmentID string, err error) { + cols, rows = normalizeSize(cols, rows) m.mu.Lock() defer m.mu.Unlock() - sessionID, state := m.ensureLocked("") + resolvedSessionID, state, err := m.openLocked(sessionID) + if err != nil { + return "", "", err + } attachmentID = m.nextAttachmentIDLocked() state.attachments[attachmentID] = attachmentState{ cols: cols, @@ -72,7 +77,7 @@ func (m *Manager) Open(cols, rows int) (sessionID, attachmentID string) { } recomputeSessionSize(state) - return sessionID, attachmentID + return resolvedSessionID, attachmentID, nil } func (m *Manager) Ensure(sessionID string) SessionStatus { @@ -98,6 +103,7 @@ func (m *Manager) Attach(sessionID, attachmentID string, cols, rows int) error { if cols <= 0 || rows <= 0 { return ErrInvalidSize } + cols, rows = normalizeSize(cols, rows) m.mu.Lock() defer m.mu.Unlock() @@ -120,6 +126,7 @@ func (m *Manager) Resize(sessionID, attachmentID string, cols, rows int) error { if cols <= 0 || rows <= 0 { return ErrInvalidSize } + cols, rows = normalizeSize(cols, rows) m.mu.Lock() defer m.mu.Unlock() @@ -170,10 +177,32 @@ func (m *Manager) Status(sessionID string) (SessionStatus, error) { return snapshotLocked(sessionID, state), nil } +func (m *Manager) List() []SessionStatus { + m.mu.Lock() + defer m.mu.Unlock() + + sessionIDs := make([]string, 0, len(m.sessions)) + for sessionID := range m.sessions { + sessionIDs = append(sessionIDs, sessionID) + } + sort.Strings(sessionIDs) + + out := make([]SessionStatus, 0, len(sessionIDs)) + for _, sessionID := range sessionIDs { + out = append(out, snapshotLocked(sessionID, m.sessions[sessionID])) + } + return out +} + func (m *Manager) ensureLocked(sessionID string) (string, *sessionState) { if sessionID == "" { - sessionID = fmt.Sprintf("sess-%d", m.nextSessionID) - m.nextSessionID++ + for { + sessionID = fmt.Sprintf("sess-%d", m.nextSessionID) + m.nextSessionID++ + if _, exists := m.sessions[sessionID]; !exists { + break + } + } } state, ok := m.sessions[sessionID] @@ -187,6 +216,21 @@ func (m *Manager) ensureLocked(sessionID string) (string, *sessionState) { return sessionID, state } +func (m *Manager) openLocked(sessionID string) (string, *sessionState, error) { + if sessionID == "" { + resolvedSessionID, state := m.ensureLocked("") + return resolvedSessionID, state, nil + } + if _, exists := m.sessions[sessionID]; exists { + return "", nil, ErrSessionExists + } + state := &sessionState{ + attachments: map[string]attachmentState{}, + } + m.sessions[sessionID] = state + return sessionID, state, nil +} + func (m *Manager) nextAttachmentIDLocked() string { attachmentID := fmt.Sprintf("att-%d", m.nextAttachmentID) m.nextAttachmentID++ @@ -217,6 +261,16 @@ func recomputeSessionSize(state *sessionState) { state.lastKnownRows = minRows } +func normalizeSize(cols, rows int) (int, int) { + if cols > 0 && cols < 2 { + cols = 2 + } + if rows > 0 && rows < 1 { + rows = 1 + } + return cols, rows +} + func snapshotLocked(sessionID string, state *sessionState) SessionStatus { attachmentIDs := make([]string, 0, len(state.attachments)) for attachmentID := range state.attachments { diff --git a/daemon/remote/internal/session/manager_test.go b/daemon/remote/internal/session/manager_test.go index 53dfcefff..6d0d795b5 100644 --- a/daemon/remote/internal/session/manager_test.go +++ b/daemon/remote/internal/session/manager_test.go @@ -6,7 +6,10 @@ func TestSessionManagerReattachKeepsExistingSessionState(t *testing.T) { t.Parallel() mgr := NewManager() - sessionID, attachmentID := mgr.Open(120, 40) + sessionID, attachmentID, err := mgr.Open("", 120, 40) + if err != nil { + t.Fatalf("open session: %v", err) + } if err := mgr.Resize(sessionID, attachmentID, 100, 30); err != nil { t.Fatalf("resize existing attachment: %v", err) @@ -29,3 +32,44 @@ func TestSessionManagerReattachKeepsExistingSessionState(t *testing.T) { t.Fatalf("effective rows = %d, want 24", status.EffectiveRows) } } + +func TestSessionManagerOpenRejectsDuplicateExplicitSessionID(t *testing.T) { + t.Parallel() + + mgr := NewManager() + if _, _, err := mgr.Open("demo", 120, 40); err != nil { + t.Fatalf("open first session: %v", err) + } + if _, _, err := mgr.Open("demo", 80, 24); err != ErrSessionExists { + t.Fatalf("duplicate open error = %v, want %v", err, ErrSessionExists) + } + + status, err := mgr.Status("demo") + if err != nil { + t.Fatalf("status after duplicate open: %v", err) + } + if len(status.Attachments) != 1 { + t.Fatalf("attachments = %d, want 1", len(status.Attachments)) + } +} + +func TestSessionManagerGeneratedIDsSkipExistingCustomIDs(t *testing.T) { + t.Parallel() + + mgr := NewManager() + firstSessionID, _, err := mgr.Open("sess-1", 120, 40) + if err != nil { + t.Fatalf("open custom session: %v", err) + } + secondSessionID, _, err := mgr.Open("", 80, 24) + if err != nil { + t.Fatalf("open generated session: %v", err) + } + + if firstSessionID != "sess-1" { + t.Fatalf("first session id = %q, want %q", firstSessionID, "sess-1") + } + if secondSessionID != "sess-2" { + t.Fatalf("generated session id = %q, want %q", secondSessionID, "sess-2") + } +} diff --git a/daemon/remote/internal/terminal/manager.go b/daemon/remote/internal/terminal/manager.go index c4d2d95d0..3a0362771 100644 --- a/daemon/remote/internal/terminal/manager.go +++ b/daemon/remote/internal/terminal/manager.go @@ -1,6 +1,7 @@ package terminal import ( + "bytes" "errors" "io" "os" @@ -106,6 +107,14 @@ func (m *Manager) Read(sessionID string, offset uint64, maxBytes int, timeout ti return state.read(offset, maxBytes, timeout) } +func (m *Manager) History(sessionID string) ([]byte, error) { + state, err := m.session(sessionID) + if err != nil { + return nil, err + } + return state.history(), nil +} + func (m *Manager) Resize(sessionID string, cols, rows int) error { state, err := m.session(sessionID) if err != nil { @@ -173,6 +182,10 @@ func (s *sessionState) appendOutput(data []byte) { if len(data) == 0 { return } + data = normalizeLineEndings(data) + if len(data) == 0 { + return + } s.mu.Lock() s.buffer = append(s.buffer, data...) @@ -188,6 +201,12 @@ func (s *sessionState) appendOutput(data []byte) { close(notify) } +func (s *sessionState) history() []byte { + s.mu.Lock() + defer s.mu.Unlock() + return append([]byte(nil), s.buffer...) +} + func (s *sessionState) markClosed() { s.mu.Lock() if s.closed { @@ -265,6 +284,20 @@ func (s *sessionState) read(offset uint64, maxBytes int, timeout time.Duration) } } +func normalizeLineEndings(data []byte) []byte { + if !bytes.Contains(data, []byte("\r\n")) { + return data + } + out := make([]byte, 0, len(data)) + for i := 0; i < len(data); i++ { + if data[i] == '\r' && i+1 < len(data) && data[i+1] == '\n' { + continue + } + out = append(out, data[i]) + } + return out +} + func (s *sessionState) close() error { s.markClosed() diff --git a/daemon/remote/internal/terminal/manager_test.go b/daemon/remote/internal/terminal/manager_test.go index 4c1cde330..deb9d3f11 100644 --- a/daemon/remote/internal/terminal/manager_test.go +++ b/daemon/remote/internal/terminal/manager_test.go @@ -34,7 +34,7 @@ func TestManagerRoundTripsOutputAndInput(t *testing.T) { if err != nil { t.Fatalf("read echoed output: %v", err) } - if string(echo.Data) != "hello\r\n" { - t.Fatalf("echo data = %q, want %q", string(echo.Data), "hello\r\n") + if string(echo.Data) != "hello\n" { + t.Fatalf("echo data = %q, want %q", string(echo.Data), "hello\n") } } diff --git a/daemon/remote/rust/.cargo/config.toml b/daemon/remote/rust/.cargo/config.toml new file mode 100644 index 000000000..d4e399263 --- /dev/null +++ b/daemon/remote/rust/.cargo/config.toml @@ -0,0 +1,2 @@ +[env] +MACOSX_DEPLOYMENT_TARGET = "11.0" diff --git a/daemon/remote/rust/.gitignore b/daemon/remote/rust/.gitignore new file mode 100644 index 000000000..a27809047 --- /dev/null +++ b/daemon/remote/rust/.gitignore @@ -0,0 +1,4 @@ +/target/ +/ghostty-shim/.zig-cache/ +/ghostty-shim/ghostty +/vendor/ diff --git a/daemon/remote/rust/Cargo.lock b/daemon/remote/rust/Cargo.lock new file mode 100644 index 000000000..5460b0629 --- /dev/null +++ b/daemon/remote/rust/Cargo.lock @@ -0,0 +1,705 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "cc" +version = "1.2.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7a4d3ec6524d28a329fc53654bbadc9bdd7b0431f5d65f1a56ffb28a1ee5283" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cmuxd-remote" +version = "0.1.0" +dependencies = [ + "base64", + "crossbeam-channel", + "hmac", + "libc", + "portable-pty", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "sha1", + "sha2", + "signal-hook", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "filedescriptor" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e40758ed24c9b2eeb76c35fb0aebc66c626084edd827e07e1552279814c6682d" +dependencies = [ + "libc", + "thiserror", + "winapi", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "ioctl-rs" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7970510895cee30b3e9128319f2cefd4bde883a39f38baa279567ba3a7eb97d" +dependencies = [ + "libc", +] + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.184" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "nix" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4" +dependencies = [ + "autocfg", + "bitflags", + "cfg-if", + "libc", + "memoffset", + "pin-utils", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "portable-pty" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "806ee80c2a03dbe1a9fb9534f8d19e4c0546b790cde8fd1fea9d6390644cb0be" +dependencies = [ + "anyhow", + "bitflags", + "downcast-rs", + "filedescriptor", + "lazy_static", + "libc", + "log", + "nix", + "serial", + "shared_library", + "shell-words", + "winapi", + "winreg", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.23.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serial" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1237a96570fc377c13baa1b88c7589ab66edced652e43ffb17088f003db3e86" +dependencies = [ + "serial-core", + "serial-unix", + "serial-windows", +] + +[[package]] +name = "serial-core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f46209b345401737ae2125fe5b19a77acce90cd53e1658cda928e4fe9a64581" +dependencies = [ + "libc", +] + +[[package]] +name = "serial-unix" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f03fbca4c9d866e24a459cbca71283f545a37f8e3e002ad8c70593871453cab7" +dependencies = [ + "ioctl-rs", + "libc", + "serial-core", + "termios", +] + +[[package]] +name = "serial-windows" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15c6d3b776267a75d31bbdfd5d36c0ca051251caafc285827052bc53bcdc8162" +dependencies = [ + "libc", + "serial-core", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "shared_library" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11" +dependencies = [ + "lazy_static", + "libc", +] + +[[package]] +name = "shell-words" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6fe69c597f9c37bfeeeeeb33da3530379845f10be461a66d16d03eca2ded77" + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "termios" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5d9cf598a6d7ce700a4e6a9199da127e6819a61e64b68609683cc9a01b5683a" +dependencies = [ + "libc", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/daemon/remote/rust/Cargo.toml b/daemon/remote/rust/Cargo.toml new file mode 100644 index 000000000..b84547324 --- /dev/null +++ b/daemon/remote/rust/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "cmuxd-remote" +version = "0.1.0" +edition = "2024" +build = "build.rs" + +[dependencies] +base64 = "0.22" +crossbeam-channel = "0.5" +hmac = "0.12" +libc = "0.2" +portable-pty = "0.8" +rustls = { version = "0.23", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls-pemfile = "2" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +sha1 = "0.10" +sha2 = "0.10" +signal-hook = "0.3" + +[profile.release] +lto = "thin" diff --git a/daemon/remote/rust/build.rs b/daemon/remote/rust/build.rs new file mode 100644 index 000000000..2f48dfa08 --- /dev/null +++ b/daemon/remote/rust/build.rs @@ -0,0 +1,133 @@ +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::Command; + +fn main() { + let manifest_dir = + PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is set")); + let shim_dir = manifest_dir.join("ghostty-shim"); + let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR is set")); + let install_dir = out_dir.join("ghostty-shim-install"); + let rust_target = env::var("TARGET").expect("TARGET is set"); + let macos_deployment = + env::var("MACOSX_DEPLOYMENT_TARGET").unwrap_or_else(|_| "11.0".to_string()); + + let ghostty_source = env::var_os("GHOSTTY_SOURCE_DIR") + .map(PathBuf::from) + .unwrap_or_else(|| manifest_dir.join("../../../ghostty")); + if !ghostty_source.join("build.zig").exists() { + panic!( + "Ghostty source not found at {}. Set GHOSTTY_SOURCE_DIR to the worktree ghostty checkout.", + ghostty_source.display() + ); + } + + let shim_link = shim_dir.join("ghostty"); + ensure_symlink(&ghostty_source, &shim_link) + .expect("failed to link Ghostty source into shim workspace"); + + // The embedded Ghostty VT hits debug-only assertions on real shell output. + // Build the shim in release mode by default so the daemon stays alive. + let optimize = + env::var("CMUX_GHOSTTY_SHIM_OPTIMIZE").unwrap_or_else(|_| "ReleaseFast".to_string()); + let mut command = Command::new("zig"); + command + .current_dir(&shim_dir) + .arg("build") + .arg("--prefix") + .arg(&install_dir) + .arg(format!("-Doptimize={optimize}")); + if let Some(zig_target) = zig_target_for_rust(&rust_target, &macos_deployment) { + command.arg(format!("-Dtarget={zig_target}")); + } + let status = command + .status() + .expect("failed to run zig build for cmux Ghostty shim"); + if !status.success() { + panic!("zig build failed for cmux Ghostty shim"); + } + + println!( + "cargo:rustc-link-search=native={}", + install_dir.join("lib").display() + ); + println!("cargo:rustc-link-lib=dylib=cmux-ghostty-shim"); + println!( + "cargo:rustc-link-arg=-Wl,-rpath,{}", + install_dir.join("lib").display() + ); + let cpp_runtime = match rust_target.as_str() { + target if target.contains("apple-darwin") => "c++", + target if target.contains("linux") => "stdc++", + _ => "c++", + }; + println!("cargo:rerun-if-env-changed=GHOSTTY_SOURCE_DIR"); + println!("cargo:rerun-if-env-changed=CMUX_GHOSTTY_SHIM_OPTIMIZE"); + println!( + "cargo:rerun-if-changed={}", + manifest_dir.join("build.rs").display() + ); + println!("cargo:rustc-link-lib={cpp_runtime}"); + println!( + "cargo:rerun-if-changed={}", + manifest_dir.join("ghostty-shim/build.zig").display() + ); + println!( + "cargo:rerun-if-changed={}", + manifest_dir.join("ghostty-shim/build.zig.zon").display() + ); + println!( + "cargo:rerun-if-changed={}", + manifest_dir.join("ghostty-shim/src/root.zig").display() + ); +} + +#[cfg(unix)] +fn ensure_symlink(target: &Path, link: &Path) -> Result<(), String> { + use std::os::unix::fs as unix_fs; + + let target = target + .canonicalize() + .map_err(|err| format!("canonicalize {}: {err}", target.display()))?; + + if let Ok(existing) = fs::read_link(link) { + let resolved = if existing.is_absolute() { + existing + } else { + link.parent() + .unwrap_or_else(|| Path::new(".")) + .join(existing) + }; + if resolved == target { + return Ok(()); + } + } + + if let Ok(metadata) = fs::symlink_metadata(link) { + if metadata.file_type().is_dir() && !metadata.file_type().is_symlink() { + fs::remove_dir_all(link) + .map_err(|err| format!("remove_dir_all {}: {err}", link.display()))?; + } else { + fs::remove_file(link) + .map_err(|err| format!("remove_file {}: {err}", link.display()))?; + } + } + + unix_fs::symlink(&target, link) + .map_err(|err| format!("symlink {} -> {}: {err}", link.display(), target.display())) +} + +#[cfg(not(unix))] +fn ensure_symlink(_target: &Path, _link: &Path) -> Result<(), String> { + Err("cmux Ghostty shim only supports unix-like builds".to_string()) +} + +fn zig_target_for_rust(rust_target: &str, macos_deployment: &str) -> Option<String> { + let arch = match rust_target { + "aarch64-apple-darwin" => "aarch64", + "x86_64-apple-darwin" => "x86_64", + _ => return None, + }; + Some(format!("{arch}-macos.{macos_deployment}")) +} diff --git a/daemon/remote/rust/ghostty-shim/build.zig b/daemon/remote/rust/ghostty-shim/build.zig new file mode 100644 index 000000000..8a021d66e --- /dev/null +++ b/daemon/remote/rust/ghostty-shim/build.zig @@ -0,0 +1,38 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{}); + + const mod = b.createModule(.{ + .root_source_file = b.path("src/root.zig"), + .target = target, + .optimize = optimize, + }); + + if (b.lazyDependency("ghostty", .{ + .target = target, + .optimize = optimize, + })) |dep| { + mod.addImport("ghostty-vt", dep.module("ghostty-vt")); + } + + const lib = b.addLibrary(.{ + .name = "cmux-ghostty-shim", + .linkage = .dynamic, + .root_module = mod, + }); + lib.linkLibC(); + lib.linkLibCpp(); + b.installArtifact(lib); + + const unit_tests = b.addTest(.{ + .root_module = mod, + }); + unit_tests.linkLibC(); + unit_tests.linkLibCpp(); + + const run_unit_tests = b.addRunArtifact(unit_tests); + const test_step = b.step("test", "Run cmux Ghostty shim tests"); + test_step.dependOn(&run_unit_tests.step); +} diff --git a/daemon/remote/rust/ghostty-shim/build.zig.zon b/daemon/remote/rust/ghostty-shim/build.zig.zon new file mode 100644 index 000000000..cd1e92458 --- /dev/null +++ b/daemon/remote/rust/ghostty-shim/build.zig.zon @@ -0,0 +1,14 @@ +.{ + .name = .cmux_ghostty_shim, + .version = "0.0.0", + .fingerprint = 0x5ab27c4ebe6da4dd, + .minimum_zig_version = "0.15.1", + .dependencies = .{ + .ghostty = .{ .path = "ghostty" }, + }, + .paths = .{ + "build.zig", + "build.zig.zon", + "src", + }, +} diff --git a/daemon/remote/rust/ghostty-shim/src/root.zig b/daemon/remote/rust/ghostty-shim/src/root.zig new file mode 100644 index 000000000..8d456be6e --- /dev/null +++ b/daemon/remote/rust/ghostty-shim/src/root.zig @@ -0,0 +1,132 @@ +const std = @import("std"); +const ghostty_vt = @import("ghostty-vt"); +const testing = std.testing; + +const Allocator = std.mem.Allocator; + +const Handle = struct { + alloc: Allocator, + terminal: ghostty_vt.Terminal, + stream: ghostty_vt.ReadonlyStream, + + fn init(self: *Handle, alloc: Allocator, cols: u16, rows: u16, max_scrollback: usize) !void { + self.alloc = alloc; + self.terminal = try ghostty_vt.Terminal.init(alloc, .{ + .cols = @max(@as(u16, 2), cols), + .rows = @max(@as(u16, 1), rows), + .max_scrollback = max_scrollback, + }); + + // The readonly stream stores a pointer to the terminal, so it must be + // created from the terminal in its final storage location. + self.stream = self.terminal.vtStream(); + } + + fn deinit(self: *Handle) void { + self.stream.deinit(); + self.terminal.deinit(self.alloc); + } +}; + +pub const CaptureBuffer = extern struct { + ptr: [*c]u8, + len: usize, +}; + +const CapturePayload = struct { + cols: u16, + rows: u16, + cursor_x: u16, + cursor_y: u16, + history: []const u8, + visible: []const u8, +}; + +export fn cmux_ghostty_new(cols: u16, rows: u16, max_scrollback: usize) ?*Handle { + const alloc = std.heap.c_allocator; + const handle = alloc.create(Handle) catch return null; + handle.init(alloc, cols, rows, max_scrollback) catch { + alloc.destroy(handle); + return null; + }; + return handle; +} + +export fn cmux_ghostty_free(handle: ?*Handle) void { + const ptr = handle orelse return; + ptr.deinit(); + std.heap.c_allocator.destroy(ptr); +} + +export fn cmux_ghostty_feed(handle: *Handle, data_ptr: [*]const u8, data_len: usize) bool { + handle.stream.nextSlice(data_ptr[0..data_len]) catch return false; + return true; +} + +export fn cmux_ghostty_resize(handle: *Handle, cols: u16, rows: u16) bool { + handle.terminal.resize( + handle.alloc, + @max(@as(u16, 2), cols), + @max(@as(u16, 1), rows), + ) catch return false; + return true; +} + +export fn cmux_ghostty_capture_json( + handle: *Handle, + include_history: bool, + out: *CaptureBuffer, +) bool { + const alloc = std.heap.c_allocator; + const screen = handle.terminal.screens.active; + + const visible = dumpOrEmpty(screen, alloc, .{ .active = .{} }) catch return false; + defer alloc.free(visible); + + const history = if (include_history) + dumpOrEmpty(screen, alloc, .{ .history = .{} }) catch return false + else + alloc.dupe(u8, "") catch return false; + defer alloc.free(history); + + var builder: std.Io.Writer.Allocating = .init(alloc); + defer builder.deinit(); + + std.json.Stringify.value(CapturePayload{ + .cols = @intCast(handle.terminal.cols), + .rows = @intCast(handle.terminal.rows), + .cursor_x = @intCast(handle.terminal.screens.active.cursor.x), + .cursor_y = @intCast(handle.terminal.screens.active.cursor.y), + .history = history, + .visible = visible, + }, .{}, &builder.writer) catch return false; + + const encoded = builder.writer.buffered(); + const owned = alloc.dupe(u8, encoded) catch return false; + out.* = .{ + .ptr = if (owned.len == 0) null else owned.ptr, + .len = owned.len, + }; + return true; +} + +export fn cmux_ghostty_buffer_free(ptr: [*c]u8, len: usize) void { + if (ptr == null or len == 0) return; + std.heap.c_allocator.free(ptr[0..len]); +} + +fn dumpOrEmpty(screen: *const ghostty_vt.Screen, alloc: Allocator, point: ghostty_vt.Point) ![]const u8 { + return screen.dumpStringAllocUnwrapped(alloc, point) catch |err| switch (err) { + error.UnknownPoint => alloc.dupe(u8, ""), + else => err, + }; +} + +test "Handle.init keeps vt stream bound to stored terminal" { + const handle = try testing.allocator.create(Handle); + defer testing.allocator.destroy(handle); + try handle.init(testing.allocator, 80, 24, 1_000); + defer handle.deinit(); + + try testing.expectEqual(@intFromPtr(&handle.terminal), @intFromPtr(handle.stream.handler.terminal)); +} diff --git a/daemon/remote/rust/src/auth.rs b/daemon/remote/rust/src/auth.rs new file mode 100644 index 000000000..e3b67fb62 --- /dev/null +++ b/daemon/remote/rust/src/auth.rs @@ -0,0 +1,127 @@ +use base64::Engine; +use hmac::{Hmac, Mac}; +use serde::{Deserialize, Serialize}; +use sha2::Sha256; + +type HmacSha256 = Hmac<Sha256>; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TicketClaims { + #[serde(default)] + pub server_id: String, + #[serde(default)] + pub team_id: String, + #[serde(default)] + pub session_id: String, + #[serde(default)] + pub attachment_id: String, + #[serde(default)] + pub capabilities: Vec<String>, + #[serde(default)] + pub exp: i64, + #[serde(default)] + pub nonce: String, +} + +#[derive(Debug, Clone)] +pub enum TicketError { + Malformed, + InvalidSignature, + Expired, + WrongServer, +} + +impl std::fmt::Display for TicketError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TicketError::Malformed => write!(f, "malformed ticket"), + TicketError::InvalidSignature => write!(f, "invalid ticket signature"), + TicketError::Expired => write!(f, "ticket expired"), + TicketError::WrongServer => write!(f, "ticket server mismatch"), + } + } +} + +impl std::error::Error for TicketError {} + +pub fn verify_ticket( + token: &str, + secret: &[u8], + expected_server_id: &str, +) -> Result<TicketClaims, TicketError> { + let mut parts = token.split('.'); + let encoded_payload = parts.next().ok_or(TicketError::Malformed)?; + let encoded_signature = parts.next().ok_or(TicketError::Malformed)?; + if parts.next().is_some() { + return Err(TicketError::Malformed); + } + + let signature = base64::engine::general_purpose::URL_SAFE_NO_PAD + .decode(encoded_signature) + .map_err(|_| TicketError::Malformed)?; + let mut mac = HmacSha256::new_from_slice(secret).expect("hmac key"); + mac.update(encoded_payload.as_bytes()); + if mac.verify_slice(&signature).is_err() { + return Err(TicketError::InvalidSignature); + } + + let payload = base64::engine::general_purpose::URL_SAFE_NO_PAD + .decode(encoded_payload) + .map_err(|_| TicketError::Malformed)?; + let claims: TicketClaims = + serde_json::from_slice(&payload).map_err(|_| TicketError::Malformed)?; + if claims.exp <= now_unix() { + return Err(TicketError::Expired); + } + if !expected_server_id.is_empty() && claims.server_id != expected_server_id { + return Err(TicketError::WrongServer); + } + Ok(claims) +} + +pub fn has_session_capability(capabilities: &[String]) -> bool { + capabilities + .iter() + .any(|value| value == "session.attach" || value == "session.open") +} + +#[cfg_attr(not(test), allow(dead_code))] +pub fn sign(payload: &[u8], secret: &[u8]) -> Vec<u8> { + let mut mac = HmacSha256::new_from_slice(secret).expect("hmac key"); + mac.update(payload); + mac.finalize().into_bytes().to_vec() +} + +fn now_unix() -> i64 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|value| value.as_secs() as i64) + .unwrap_or_default() +} + +#[cfg(test)] +mod tests { + use super::*; + + fn encode(value: &[u8]) -> String { + base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(value) + } + + #[test] + fn verify_ticket_accepts_valid_signature() { + let payload = encode(br#"{"server_id":"srv","exp":4102444800,"nonce":"n"}"#); + let signature = encode(&sign(payload.as_bytes(), b"secret")); + let token = format!("{payload}.{signature}"); + assert!(verify_ticket(&token, b"secret", "srv").is_ok()); + } + + #[test] + fn verify_ticket_rejects_invalid_signature() { + let payload = encode(br#"{"server_id":"srv","exp":4102444800,"nonce":"n"}"#); + let token = format!("{payload}.{}", encode(b"wrong")); + assert!(matches!( + verify_ticket(&token, b"secret", "srv"), + Err(TicketError::InvalidSignature) + )); + } +} diff --git a/daemon/remote/rust/src/capture.rs b/daemon/remote/rust/src/capture.rs new file mode 100644 index 000000000..a8e0b2e7b --- /dev/null +++ b/daemon/remote/rust/src/capture.rs @@ -0,0 +1,26 @@ +use crate::ghostty::GhosttyCapture; + +#[derive(Debug, Clone, serde::Serialize)] +pub struct TerminalCapture { + pub title: String, + pub pwd: String, + pub cols: u16, + pub rows: u16, + pub cursor_x: u16, + pub cursor_y: u16, + pub history: String, + pub visible: String, +} + +pub fn capture_terminal(raw: GhosttyCapture, title: String, pwd: String) -> TerminalCapture { + TerminalCapture { + title, + pwd, + cols: raw.cols, + rows: raw.rows, + cursor_x: raw.cursor_x, + cursor_y: raw.cursor_y, + history: raw.history, + visible: raw.visible, + } +} diff --git a/daemon/remote/rust/src/client.rs b/daemon/remote/rust/src/client.rs new file mode 100644 index 000000000..f4b7594f5 --- /dev/null +++ b/daemon/remote/rust/src/client.rs @@ -0,0 +1,653 @@ +use std::env; +use std::io::{self, BufRead, BufReader, Read, Write}; +use std::os::unix::net::UnixStream; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; +use std::thread; +use std::time::{SystemTime, UNIX_EPOCH}; + +use base64::Engine; +use serde_json::{Value, json}; +use signal_hook::consts::signal::SIGWINCH; +use signal_hook::iterator::Signals; + +use crate::rpc::Response; + +pub struct UnixRpcClient { + writer: UnixStream, + reader: BufReader<UnixStream>, + next_id: u64, +} + +impl UnixRpcClient { + pub fn connect(path: &str) -> Result<Self, String> { + let writer = UnixStream::connect(path).map_err(|err| err.to_string())?; + let reader = BufReader::new(writer.try_clone().map_err(|err| err.to_string())?); + Ok(Self { + writer, + reader, + next_id: 1, + }) + } + + pub fn call_value(&mut self, method: String, params: Value) -> Result<Value, String> { + let id = self.next_id; + self.next_id += 1; + let payload = json!({ + "id": id, + "method": method, + "params": params, + }); + let encoded = serde_json::to_vec(&payload).map_err(|err| err.to_string())?; + self.writer + .write_all(&encoded) + .map_err(|err| err.to_string())?; + self.writer + .write_all(b"\n") + .map_err(|err| err.to_string())?; + self.writer.flush().map_err(|err| err.to_string())?; + + let mut line = String::new(); + self.reader + .read_line(&mut line) + .map_err(|err| err.to_string())?; + let response: Response = serde_json::from_str(&line).map_err(|err| err.to_string())?; + if response.ok { + Ok(response.result.unwrap_or_else(|| json!({}))) + } else { + Err(response + .error + .map(|value| value.message) + .unwrap_or_else(|| "request failed".to_string())) + } + } +} + +pub fn run_session_cli(args: &[String]) -> Result<i32, String> { + if args.is_empty() { + print_session_usage(); + return Ok(2); + } + let socket_path = find_socket_arg(args) + .or_else(|| env::var("CMUXD_UNIX_PATH").ok()) + .ok_or_else(|| "missing --socket and CMUXD_UNIX_PATH".to_string())?; + let filtered = strip_socket_arg(args); + match filtered.first().map(String::as_str) { + Some("ls") | Some("list") => session_list(&socket_path), + Some("status") => session_status( + &socket_path, + filtered + .get(1) + .ok_or_else(|| "status requires a session id".to_string())?, + ), + Some("history") => session_history( + &socket_path, + filtered + .get(1) + .ok_or_else(|| "history requires a session id".to_string())?, + ), + Some("kill") => session_kill( + &socket_path, + filtered + .get(1) + .ok_or_else(|| "kill requires a session id".to_string())?, + ), + Some("new") => session_new(&socket_path, &filtered[1..]), + Some("attach") => session_attach( + &socket_path, + filtered + .get(1) + .ok_or_else(|| "attach requires a session id".to_string())?, + ), + _ => { + print_session_usage(); + Ok(2) + } + } +} + +pub fn run_amux_cli(args: &[String]) -> Result<i32, String> { + if args.is_empty() { + eprintln!("Usage: cmuxd-remote amux capture|events|wait ..."); + return Ok(2); + } + let socket_path = find_socket_arg(args) + .or_else(|| env::var("CMUXD_UNIX_PATH").ok()) + .ok_or_else(|| "missing --socket and CMUXD_UNIX_PATH".to_string())?; + let filtered = strip_socket_arg(args); + match filtered.first().map(String::as_str) { + Some("capture") => { + let mut client = UnixRpcClient::connect(&socket_path)?; + let session_id = filtered.get(1).cloned().unwrap_or_default(); + let value = client.call_value( + "amux.capture".to_string(), + json!({ + "session_id": session_id, + "history": true, + }), + )?; + println!( + "{}", + serde_json::to_string_pretty(&value).map_err(|err| err.to_string())? + ); + Ok(0) + } + Some("events") => { + let mut cursor = 0_u64; + let mut client = UnixRpcClient::connect(&socket_path)?; + loop { + let value = client.call_value( + "amux.events.read".to_string(), + json!({ + "cursor": cursor, + "timeout_ms": 1000, + }), + )?; + if let Some(next_cursor) = value.get("cursor").and_then(Value::as_u64) { + cursor = next_cursor; + } + if let Some(events) = value.get("events").and_then(Value::as_array) { + for event in events { + println!( + "{}", + serde_json::to_string(event).map_err(|err| err.to_string())? + ); + } + } + } + } + Some("wait") => { + let kind = filtered + .get(1) + .cloned() + .unwrap_or_else(|| "ready".to_string()); + let mut client = UnixRpcClient::connect(&socket_path)?; + let value = client.call_value( + "amux.wait".to_string(), + json!({ + "kind": kind, + "session_id": filtered.get(2).cloned().unwrap_or_default(), + "timeout_ms": 30_000, + }), + )?; + println!( + "{}", + serde_json::to_string_pretty(&value).map_err(|err| err.to_string())? + ); + Ok(0) + } + _ => { + eprintln!("Usage: cmuxd-remote amux capture|events|wait ..."); + Ok(2) + } + } +} + +pub fn run_tmux_cli(args: &[String]) -> Result<i32, String> { + let socket_path = find_socket_arg(args) + .or_else(|| env::var("CMUXD_UNIX_PATH").ok()) + .ok_or_else(|| "missing --socket and CMUXD_UNIX_PATH".to_string())?; + let filtered = strip_socket_arg(args); + if filtered.is_empty() { + eprintln!("Usage: cmuxd-remote tmux <command> [args...]"); + return Ok(2); + } + let mut client = UnixRpcClient::connect(&socket_path)?; + let value = client.call_value("tmux.exec".to_string(), json!({ "argv": filtered }))?; + if let Some(stdout) = value.get("stdout").and_then(Value::as_str) { + print!("{stdout}"); + } else { + println!( + "{}", + serde_json::to_string_pretty(&value).map_err(|err| err.to_string())? + ); + } + Ok(0) +} + +fn session_list(socket_path: &str) -> Result<i32, String> { + let mut client = UnixRpcClient::connect(socket_path)?; + let value = client.call_value("session.list".to_string(), json!({}))?; + let sessions = value + .get("sessions") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + if sessions.is_empty() { + println!("No sessions"); + return Ok(0); + } + for item in sessions { + let session_id = item + .get("session_id") + .and_then(Value::as_str) + .unwrap_or_default(); + let status = client.call_value( + "session.status".to_string(), + json!({ "session_id": session_id }), + )?; + let effective_cols = status + .get("effective_cols") + .and_then(Value::as_u64) + .unwrap_or_default(); + let effective_rows = status + .get("effective_rows") + .and_then(Value::as_u64) + .unwrap_or_default(); + let attachments = status + .get("attachments") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + if attachments.is_empty() { + println!("session {session_id} {effective_cols}x{effective_rows} [detached]"); + continue; + } + println!( + "session {session_id} {effective_cols}x{effective_rows} attachments={}", + attachments.len() + ); + for (index, attachment) in attachments.iter().enumerate() { + let branch = if index + 1 == attachments.len() { + "└──" + } else { + "├──" + }; + let attachment_id = attachment + .get("attachment_id") + .and_then(Value::as_str) + .unwrap_or_default(); + let cols = attachment + .get("cols") + .and_then(Value::as_u64) + .unwrap_or_default(); + let rows = attachment + .get("rows") + .and_then(Value::as_u64) + .unwrap_or_default(); + println!("{branch} {attachment_id} {cols}x{rows}"); + } + } + Ok(0) +} + +fn session_status(socket_path: &str, session_id: &str) -> Result<i32, String> { + let mut client = UnixRpcClient::connect(socket_path)?; + let value = client.call_value( + "session.status".to_string(), + json!({ "session_id": session_id }), + )?; + let effective_cols = value + .get("effective_cols") + .and_then(Value::as_u64) + .unwrap_or_default(); + let effective_rows = value + .get("effective_rows") + .and_then(Value::as_u64) + .unwrap_or_default(); + println!("{session_id} {effective_cols}x{effective_rows}"); + Ok(0) +} + +fn session_history(socket_path: &str, session_id: &str) -> Result<i32, String> { + let mut client = UnixRpcClient::connect(socket_path)?; + let value = client.call_value( + "session.history".to_string(), + json!({ "session_id": session_id }), + )?; + print!( + "{}", + value + .get("history") + .and_then(Value::as_str) + .unwrap_or_default() + ); + Ok(0) +} + +fn session_kill(socket_path: &str, session_id: &str) -> Result<i32, String> { + let mut client = UnixRpcClient::connect(socket_path)?; + let _ = client.call_value( + "session.close".to_string(), + json!({ "session_id": session_id }), + )?; + println!("{session_id}"); + Ok(0) +} + +fn session_new(socket_path: &str, args: &[String]) -> Result<i32, String> { + let session_id = args + .first() + .ok_or_else(|| "new requires a session id".to_string())?; + let detached = args.iter().any(|value| value == "--detached"); + let quiet = args.iter().any(|value| value == "--quiet"); + let command = + split_command_tail(args).unwrap_or_else(|| "exec ${SHELL:-/bin/sh} -l".to_string()); + let (cols, rows) = current_size(); + let mut client = UnixRpcClient::connect(socket_path)?; + let value = client.call_value( + "terminal.open".to_string(), + json!({ + "session_id": session_id, + "command": command, + "cols": cols, + "rows": rows, + }), + )?; + let attachment_id = value + .get("attachment_id") + .and_then(Value::as_str) + .ok_or_else(|| "terminal.open did not return attachment_id".to_string())? + .to_string(); + if !quiet { + println!("{session_id}"); + } + let _ = client.call_value( + "session.detach".to_string(), + json!({ + "session_id": session_id, + "attachment_id": attachment_id, + }), + )?; + if detached { + Ok(0) + } else { + session_attach(socket_path, session_id) + } +} + +fn session_attach(socket_path: &str, session_id: &str) -> Result<i32, String> { + let attachment_id = format!("cli-{}-{}", std::process::id(), unix_now()); + let (cols, rows) = current_size(); + let mut control = UnixRpcClient::connect(socket_path)?; + let _ = control.call_value( + "session.attach".to_string(), + json!({ + "session_id": session_id, + "attachment_id": attachment_id, + "cols": cols, + "rows": rows, + }), + )?; + + let stop = Arc::new(AtomicBool::new(false)); + let reported_size = Arc::new(AtomicU32::new(pack_size(cols, rows))); + let result = (|| -> Result<i32, String> { + let raw_mode = RawModeGuard::new()?; + + { + let stop = Arc::clone(&stop); + let reported_size = Arc::clone(&reported_size); + let socket_path = socket_path.to_string(); + let session_id = session_id.to_string(); + let attachment_id = attachment_id.clone(); + thread::spawn(move || { + let mut signals = match Signals::new([SIGWINCH]) { + Ok(value) => value, + Err(_) => return, + }; + for _ in signals.forever() { + if stop.load(Ordering::Relaxed) { + break; + } + let (cols, rows) = current_size(); + if let Ok(mut client) = UnixRpcClient::connect(&socket_path) { + let _ = sync_attachment_size_if_needed( + &mut client, + &reported_size, + &session_id, + &attachment_id, + cols, + rows, + ); + } + } + }); + } + + { + let stop = Arc::clone(&stop); + let socket_path = socket_path.to_string(); + let session_id = session_id.to_string(); + thread::spawn(move || { + let mut client = match UnixRpcClient::connect(&socket_path) { + Ok(value) => value, + Err(_) => { + stop.store(true, Ordering::Relaxed); + return; + } + }; + let mut offset = 0_u64; + let stdout = io::stdout(); + let mut stdout = stdout.lock(); + while !stop.load(Ordering::Relaxed) { + match client.call_value( + "terminal.read".to_string(), + json!({ + "session_id": session_id, + "offset": offset, + "max_bytes": 32 * 1024, + "timeout_ms": 200, + }), + ) { + Ok(value) => { + if let Some(next_offset) = value.get("offset").and_then(Value::as_u64) { + offset = next_offset; + } + if let Some(data) = value.get("data").and_then(Value::as_str) { + if let Ok(decoded) = + base64::engine::general_purpose::STANDARD.decode(data) + { + let _ = stdout.write_all(&decoded); + let _ = stdout.flush(); + } + } + if value.get("eof").and_then(Value::as_bool) == Some(true) { + stop.store(true, Ordering::Relaxed); + break; + } + } + Err(err) if err == "terminal read timed out" => continue, + Err(_) => { + stop.store(true, Ordering::Relaxed); + break; + } + } + } + }); + } + + let stdin = io::stdin(); + let mut stdin = stdin.lock(); + let mut buf = [0_u8; 1024]; + loop { + if stop.load(Ordering::Relaxed) { + break; + } + let (cols, rows) = current_size(); + let _ = sync_attachment_size_if_needed( + &mut control, + &reported_size, + session_id, + &attachment_id, + cols, + rows, + ); + if !poll_stdin(200)? { + continue; + } + let len = stdin.read(&mut buf).map_err(|err| err.to_string())?; + if len == 0 { + break; + } + if buf[..len].contains(&0x1c) { + break; + } + let data = base64::engine::general_purpose::STANDARD.encode(&buf[..len]); + let _ = control.call_value( + "terminal.write".to_string(), + json!({ + "session_id": session_id, + "data": data, + }), + )?; + } + drop(raw_mode); + Ok(0) + })(); + + stop.store(true, Ordering::Relaxed); + let _ = control.call_value( + "session.detach".to_string(), + json!({ + "session_id": session_id, + "attachment_id": attachment_id, + }), + ); + result +} + +fn print_session_usage() { + eprintln!("Usage:"); + eprintln!(" cmuxd-remote session ls|list [--socket <path>]"); + eprintln!(" cmuxd-remote session attach|status|history|kill <name> [--socket <path>]"); + eprintln!( + " cmuxd-remote session new <name> [--socket <path>] [--detached] [--quiet] [-- <command>]" + ); + eprintln!("Defaults:"); + eprintln!(" --socket defaults to $CMUXD_UNIX_PATH when set."); +} + +fn find_socket_arg(args: &[String]) -> Option<String> { + let mut idx = 0; + while idx < args.len() { + if args[idx] == "--socket" && idx + 1 < args.len() { + return Some(args[idx + 1].clone()); + } + idx += 1; + } + None +} + +fn strip_socket_arg(args: &[String]) -> Vec<String> { + let mut out = Vec::new(); + let mut idx = 0; + while idx < args.len() { + if args[idx] == "--socket" && idx + 1 < args.len() { + idx += 2; + continue; + } + out.push(args[idx].clone()); + idx += 1; + } + out +} + +fn split_command_tail(args: &[String]) -> Option<String> { + args.iter() + .position(|value| value == "--") + .map(|index| args[index + 1..].join(" ")) + .filter(|value| !value.trim().is_empty()) +} + +fn current_size() -> (u16, u16) { + for fd in [libc::STDOUT_FILENO, libc::STDIN_FILENO, libc::STDERR_FILENO] { + let mut size = libc::winsize { + ws_row: 24, + ws_col: 80, + ws_xpixel: 0, + ws_ypixel: 0, + }; + unsafe { + if libc::ioctl(fd, libc::TIOCGWINSZ, &mut size) == 0 + && size.ws_col > 0 + && size.ws_row > 0 + { + return (size.ws_col.max(2), size.ws_row.max(1)); + } + } + } + (80, 24) +} + +fn pack_size(cols: u16, rows: u16) -> u32 { + (u32::from(cols) << 16) | u32::from(rows) +} + +fn sync_attachment_size_if_needed( + client: &mut UnixRpcClient, + reported_size: &AtomicU32, + session_id: &str, + attachment_id: &str, + cols: u16, + rows: u16, +) -> Result<(), String> { + let packed = pack_size(cols, rows); + if reported_size.load(Ordering::Relaxed) == packed { + return Ok(()); + } + let _ = client.call_value( + "session.resize".to_string(), + json!({ + "session_id": session_id, + "attachment_id": attachment_id, + "cols": cols, + "rows": rows, + }), + )?; + reported_size.store(packed, Ordering::Relaxed); + Ok(()) +} + +struct RawModeGuard { + original: libc::termios, +} + +impl RawModeGuard { + fn new() -> Result<Self, String> { + unsafe { + let mut original = std::mem::zeroed::<libc::termios>(); + if libc::tcgetattr(libc::STDIN_FILENO, &mut original) != 0 { + return Err(io::Error::last_os_error().to_string()); + } + let mut raw = original; + libc::cfmakeraw(&mut raw); + if libc::tcsetattr(libc::STDIN_FILENO, libc::TCSANOW, &raw) != 0 { + return Err(io::Error::last_os_error().to_string()); + } + Ok(Self { original }) + } + } +} + +impl Drop for RawModeGuard { + fn drop(&mut self) { + unsafe { + let _ = libc::tcsetattr(libc::STDIN_FILENO, libc::TCSANOW, &self.original); + } + } +} + +fn poll_stdin(timeout_ms: i32) -> Result<bool, String> { + let mut pollfd = libc::pollfd { + fd: libc::STDIN_FILENO, + events: libc::POLLIN, + revents: 0, + }; + let ready = unsafe { libc::poll(&mut pollfd, 1, timeout_ms) }; + if ready < 0 { + let err = io::Error::last_os_error(); + if err.kind() == io::ErrorKind::Interrupted { + return Ok(false); + } + return Err(err.to_string()); + } + Ok(ready > 0 && (pollfd.revents & (libc::POLLIN | libc::POLLHUP | libc::POLLERR)) != 0) +} + +fn unix_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|value| value.as_secs()) + .unwrap_or_default() +} diff --git a/daemon/remote/rust/src/ghostty.rs b/daemon/remote/rust/src/ghostty.rs new file mode 100644 index 000000000..ffd5695dc --- /dev/null +++ b/daemon/remote/rust/src/ghostty.rs @@ -0,0 +1,103 @@ +use std::ffi::c_void; +use std::ptr::NonNull; + +#[repr(C)] +struct CaptureBuffer { + ptr: *mut u8, + len: usize, +} + +unsafe extern "C" { + fn cmux_ghostty_new(cols: u16, rows: u16, max_scrollback: usize) -> *mut c_void; + fn cmux_ghostty_free(handle: *mut c_void); + fn cmux_ghostty_feed(handle: *mut c_void, data_ptr: *const u8, data_len: usize) -> bool; + fn cmux_ghostty_resize(handle: *mut c_void, cols: u16, rows: u16) -> bool; + fn cmux_ghostty_capture_json( + handle: *mut c_void, + include_history: bool, + out: *mut CaptureBuffer, + ) -> bool; + fn cmux_ghostty_buffer_free(ptr: *mut u8, len: usize); +} + +#[derive(Debug, serde::Deserialize)] +struct GhosttyCaptureJson { + cols: u16, + rows: u16, + cursor_x: u16, + cursor_y: u16, + history: String, + visible: String, +} + +#[derive(Debug, Clone)] +pub struct GhosttyCapture { + pub cols: u16, + pub rows: u16, + pub cursor_x: u16, + pub cursor_y: u16, + pub history: String, + pub visible: String, +} + +pub struct GhosttyTerminal { + raw: NonNull<c_void>, +} + +impl GhosttyTerminal { + pub fn new(cols: u16, rows: u16, max_scrollback: usize) -> Result<Self, String> { + let raw = unsafe { cmux_ghostty_new(cols, rows, max_scrollback) }; + let raw = NonNull::new(raw).ok_or_else(|| "failed to initialize Ghostty VT".to_string())?; + Ok(Self { raw }) + } + + pub fn feed(&mut self, data: &[u8]) -> Result<(), String> { + if unsafe { cmux_ghostty_feed(self.raw.as_ptr(), data.as_ptr(), data.len()) } { + Ok(()) + } else { + Err("failed to feed Ghostty VT".to_string()) + } + } + + pub fn resize(&mut self, cols: u16, rows: u16) -> Result<(), String> { + if unsafe { cmux_ghostty_resize(self.raw.as_ptr(), cols, rows) } { + Ok(()) + } else { + Err("failed to resize Ghostty VT".to_string()) + } + } + + pub fn capture(&self, include_history: bool) -> Result<GhosttyCapture, String> { + let mut buffer = CaptureBuffer { + ptr: std::ptr::null_mut(), + len: 0, + }; + if !unsafe { cmux_ghostty_capture_json(self.raw.as_ptr(), include_history, &mut buffer) } { + return Err("failed to capture Ghostty VT state".to_string()); + } + + let bytes = if buffer.len == 0 { + Vec::new() + } else { + unsafe { std::slice::from_raw_parts(buffer.ptr, buffer.len).to_vec() } + }; + unsafe { cmux_ghostty_buffer_free(buffer.ptr, buffer.len) }; + + let decoded: GhosttyCaptureJson = serde_json::from_slice(&bytes) + .map_err(|err| format!("invalid Ghostty capture JSON: {err}"))?; + Ok(GhosttyCapture { + cols: decoded.cols, + rows: decoded.rows, + cursor_x: decoded.cursor_x, + cursor_y: decoded.cursor_y, + history: decoded.history, + visible: decoded.visible, + }) + } +} + +impl Drop for GhosttyTerminal { + fn drop(&mut self) { + unsafe { cmux_ghostty_free(self.raw.as_ptr()) }; + } +} diff --git a/daemon/remote/rust/src/main.rs b/daemon/remote/rust/src/main.rs new file mode 100644 index 000000000..56f29ff8b --- /dev/null +++ b/daemon/remote/rust/src/main.rs @@ -0,0 +1,284 @@ +mod auth; +mod capture; +mod client; +mod ghostty; +mod metadata; +mod pane; +mod proxy; +mod rpc; +mod server; +mod session; +mod tmux; + +use std::env; +use std::io::{self, Write}; +use std::path::Path; +use std::process; + +use client::UnixRpcClient; +use server::Daemon; + +fn main() { + process::exit(run(env::args().collect())); +} + +fn run(args: Vec<String>) -> i32 { + let argv0 = args + .first() + .and_then(|value| Path::new(value).file_name()) + .and_then(|value| value.to_str()) + .unwrap_or("cmuxd-remote"); + + if argv0 == "amux" { + return run_amux_cli(&args[1..]); + } + if argv0 == "tmux" { + return run_tmux_cli(&args[1..]); + } + if argv0 == "cmux" { + return run_cli_relay(&args[1..]); + } + + if args.len() <= 1 { + usage(&mut io::stderr()); + return 2; + } + + match args[1].as_str() { + "version" => { + println!("{}", env!("CARGO_PKG_VERSION")); + 0 + } + "serve" => run_serve(&args[2..]), + "session" => run_session_cli(&args[2..]), + "amux" => run_amux_cli(&args[2..]), + "tmux" => run_tmux_cli(&args[2..]), + "cli" => run_cli_relay(&args[2..]), + "list" | "ls" | "attach" | "status" | "history" | "kill" | "new" => { + run_session_cli(&args[1..]) + } + _ => { + usage(&mut io::stderr()); + 2 + } + } +} + +fn run_serve(args: &[String]) -> i32 { + let daemon = Daemon::new(env!("CARGO_PKG_VERSION")); + if args == ["--stdio"] { + match daemon.serve_stdio(io::stdin().lock(), io::stdout().lock()) { + Ok(()) => 0, + Err(err) => { + eprintln!("serve failed: {err}"); + 1 + } + } + } else if !args.is_empty() && args[0] == "--unix" { + match daemon.serve_unix(parse_unix_args(&args[1..])) { + Ok(()) => 0, + Err(err) => { + eprintln!("serve failed: {err}"); + 1 + } + } + } else if !args.is_empty() && args[0] == "--tls" { + match daemon.serve_tls(parse_tls_args(&args[1..])) { + Ok(()) => 0, + Err(err) => { + eprintln!("serve failed: {err}"); + 1 + } + } + } else { + eprintln!("serve requires exactly one of --stdio, --unix, or --tls"); + 2 + } +} + +fn parse_unix_args(args: &[String]) -> server::UnixServeConfig { + let mut cfg = server::UnixServeConfig::default(); + let mut idx = 0; + while idx < args.len() { + if idx + 1 >= args.len() { + break; + } + match args[idx].as_str() { + "--socket" => cfg.socket_path = args[idx + 1].clone(), + "--ws-port" => cfg.ws_port = args[idx + 1].parse().ok(), + "--ws-secret" => cfg.ws_secret = Some(args[idx + 1].clone()), + _ => {} + } + idx += 2; + } + cfg +} + +fn parse_tls_args(args: &[String]) -> server::TlsServeConfig { + let mut cfg = server::TlsServeConfig::default(); + let mut idx = 0; + while idx < args.len() { + if idx + 1 >= args.len() { + break; + } + match args[idx].as_str() { + "--listen" => cfg.listen_addr = args[idx + 1].clone(), + "--server-id" => cfg.server_id = args[idx + 1].clone(), + "--ticket-secret" => cfg.ticket_secret = args[idx + 1].clone(), + "--cert-file" => cfg.cert_file = args[idx + 1].clone(), + "--key-file" => cfg.key_file = args[idx + 1].clone(), + _ => {} + } + idx += 2; + } + cfg +} + +fn run_session_cli(args: &[String]) -> i32 { + match client::run_session_cli(args) { + Ok(code) => code, + Err(err) => { + eprintln!("{err}"); + 1 + } + } +} + +fn run_amux_cli(args: &[String]) -> i32 { + match client::run_amux_cli(args) { + Ok(code) => code, + Err(err) => { + eprintln!("{err}"); + 1 + } + } +} + +fn run_tmux_cli(args: &[String]) -> i32 { + match client::run_tmux_cli(args) { + Ok(code) => code, + Err(err) => { + eprintln!("{err}"); + 1 + } + } +} + +fn run_cli_relay(args: &[String]) -> i32 { + let filtered = strip_socket_flag(args); + if filtered.is_empty() + || matches!( + filtered.first().map(String::as_str), + Some("--help") | Some("-h") | Some("help") + ) + { + cli_usage(&mut io::stdout()); + return 0; + } + + let socket = match find_socket_flag(args) + .or_else(|| env::var("CMUX_SOCKET_PATH").ok()) + .or_else(read_socket_addr_file) + { + Some(value) if !value.trim().is_empty() => value, + _ => { + eprintln!( + "cmux: CMUX_SOCKET_PATH not set, ~/.cmux/socket_addr missing, and --socket not provided" + ); + return 1; + } + }; + if filtered.first().map(String::as_str) == Some("rpc") { + if filtered.len() < 2 { + eprintln!("cmux: rpc requires a method"); + return 2; + } + let params = if filtered.len() > 2 { + match serde_json::from_str::<serde_json::Value>(&filtered[2]) { + Ok(value) => value, + Err(err) => { + eprintln!("cmux: invalid JSON params: {err}"); + return 2; + } + } + } else { + serde_json::json!({}) + }; + match UnixRpcClient::connect(&socket) + .and_then(|mut client| client.call_value(filtered[1].clone(), params)) + { + Ok(value) => { + println!( + "{}", + serde_json::to_string_pretty(&value).unwrap_or_else(|_| "{}".to_string()) + ); + 0 + } + Err(err) => { + eprintln!("cmux: {err}"); + 1 + } + } + } else { + eprintln!("cmux: Rust relay rewrite is not implemented for this command yet"); + 2 + } +} + +fn find_socket_flag(args: &[String]) -> Option<String> { + let mut idx = 0; + while idx < args.len() { + if args[idx] == "--socket" && idx + 1 < args.len() { + return Some(args[idx + 1].clone()); + } + idx += 1; + } + None +} + +fn strip_socket_flag(args: &[String]) -> Vec<String> { + let mut out = Vec::new(); + let mut idx = 0; + while idx < args.len() { + if args[idx] == "--socket" && idx + 1 < args.len() { + idx += 2; + continue; + } + out.push(args[idx].clone()); + idx += 1; + } + out +} + +fn read_socket_addr_file() -> Option<String> { + let home = env::var("HOME").ok()?; + let path = Path::new(&home).join(".cmux").join("socket_addr"); + let value = std::fs::read_to_string(path).ok()?; + let trimmed = value.trim(); + (!trimmed.is_empty()).then(|| trimmed.to_string()) +} + +fn usage(stderr: &mut dyn Write) { + let _ = writeln!(stderr, "Usage:"); + let _ = writeln!(stderr, " cmuxd-remote version"); + let _ = writeln!(stderr, " cmuxd-remote serve --stdio"); + let _ = writeln!( + stderr, + " cmuxd-remote serve --unix --socket <path> [--ws-port <port> --ws-secret <secret>]" + ); + let _ = writeln!( + stderr, + " cmuxd-remote serve --tls --listen <addr> --server-id <id> --ticket-secret <secret> --cert-file <path> --key-file <path>" + ); + let _ = writeln!(stderr, " cmuxd-remote session <command> [args...]"); + let _ = writeln!(stderr, " cmuxd-remote amux <command> [args...]"); + let _ = writeln!(stderr, " cmuxd-remote tmux <command> [args...]"); + let _ = writeln!(stderr, " cmuxd-remote cli rpc <method> [json-params]"); +} + +fn cli_usage(output: &mut dyn Write) { + let _ = writeln!( + output, + "Usage: cmux [--socket <path>] [--json] <command> [args...]" + ); +} diff --git a/daemon/remote/rust/src/metadata.rs b/daemon/remote/rust/src/metadata.rs new file mode 100644 index 000000000..4144ae797 --- /dev/null +++ b/daemon/remote/rust/src/metadata.rs @@ -0,0 +1,152 @@ +const MAX_OSC_BYTES: usize = 8192; + +#[derive(Debug, Clone, Copy, Default)] +enum State { + #[default] + Ground, + Esc, + Osc, + OscEsc, +} + +#[derive(Debug, Default)] +pub struct OscTracker { + state: State, + buf: Vec<u8>, + title: String, + pwd: String, +} + +impl OscTracker { + pub fn feed(&mut self, data: &[u8]) { + for &byte in data { + match self.state { + State::Ground => { + if byte == 0x1b { + self.state = State::Esc; + } + } + State::Esc => { + if byte == b']' { + self.buf.clear(); + self.state = State::Osc; + } else { + self.state = State::Ground; + } + } + State::Osc => match byte { + 0x07 => self.finish_osc(), + 0x1b => self.state = State::OscEsc, + _ => self.push(byte), + }, + State::OscEsc => { + if byte == b'\\' { + self.finish_osc(); + } else { + self.push(0x1b); + self.push(byte); + self.state = State::Osc; + } + } + } + } + } + + pub fn title(&self) -> &str { + &self.title + } + + pub fn pwd(&self) -> &str { + &self.pwd + } + + fn push(&mut self, byte: u8) { + if self.buf.len() < MAX_OSC_BYTES { + self.buf.push(byte); + } + } + + fn finish_osc(&mut self) { + if let Ok(payload) = String::from_utf8(self.buf.clone()) { + self.apply_payload(&payload); + } + self.buf.clear(); + self.state = State::Ground; + } + + fn apply_payload(&mut self, payload: &str) { + let Some((kind, value)) = payload.split_once(';') else { + return; + }; + match kind { + "0" | "2" => { + self.title.clear(); + self.title.push_str(value); + } + "7" => { + if let Some(decoded) = decode_pwd(value) { + self.pwd = decoded; + } + } + _ => {} + } + } +} + +fn decode_pwd(value: &str) -> Option<String> { + if value.is_empty() { + return Some(String::new()); + } + if value.starts_with('/') { + return Some(percent_decode(value)); + } + + let (_, rest) = value.split_once("://")?; + let slash = rest.find('/').unwrap_or(rest.len()); + if slash == rest.len() { + return Some("/".to_string()); + } + Some(percent_decode(&rest[slash..])) +} + +fn percent_decode(input: &str) -> String { + let bytes = input.as_bytes(); + let mut output = Vec::with_capacity(input.len()); + let mut idx = 0; + while idx < bytes.len() { + if bytes[idx] == b'%' && idx + 2 < bytes.len() { + let hi = from_hex(bytes[idx + 1]); + let lo = from_hex(bytes[idx + 2]); + if let (Some(hi), Some(lo)) = (hi, lo) { + output.push(hi << 4 | lo); + idx += 3; + continue; + } + } + output.push(bytes[idx]); + idx += 1; + } + String::from_utf8_lossy(&output).into_owned() +} + +fn from_hex(value: u8) -> Option<u8> { + match value { + b'0'..=b'9' => Some(value - b'0'), + b'a'..=b'f' => Some(value - b'a' + 10), + b'A'..=b'F' => Some(value - b'A' + 10), + _ => None, + } +} + +#[cfg(test)] +mod tests { + use super::decode_pwd; + + #[test] + fn decode_pwd_preserves_utf8_paths() { + assert_eq!( + decode_pwd("file:///tmp/caf%C3%A9").as_deref(), + Some("/tmp/café") + ); + } +} diff --git a/daemon/remote/rust/src/pane.rs b/daemon/remote/rust/src/pane.rs new file mode 100644 index 000000000..691e1d582 --- /dev/null +++ b/daemon/remote/rust/src/pane.rs @@ -0,0 +1,521 @@ +use std::env; +use std::io::{Read, Write}; +use std::sync::mpsc; +use std::sync::{Arc, Condvar, Mutex}; +use std::thread; +use std::time::{Duration, Instant}; + +use crossbeam_channel::{Receiver, Sender}; +use portable_pty::{Child, CommandBuilder, MasterPty, PtySize, native_pty_system}; + +use crate::capture::{TerminalCapture, capture_terminal}; +use crate::ghostty::GhosttyTerminal; +use crate::metadata::OscTracker; + +const MAX_RAW_BUFFER_BYTES: usize = 1 << 20; +const IDLE_SETTLE_DURATION: Duration = Duration::from_millis(250); + +#[derive(Debug, Clone, serde::Serialize)] +pub struct PaneCapture { + pub pane_id: String, + pub session_id: String, + pub capture: TerminalCapture, + pub closed: bool, + pub offset: u64, + pub base_offset: u64, +} + +#[derive(Debug)] +pub struct PaneReadResult { + pub data: Vec<u8>, + pub offset: u64, + pub base_offset: u64, + pub truncated: bool, + pub eof: bool, +} + +#[derive(Debug)] +pub struct PaneBufferState { + pub base_offset: u64, + pub next_offset: u64, + pub buffer: Vec<u8>, + pub closed: bool, + pub busy: bool, + pub busy_generation: u64, + pub title: String, + pub pwd: String, + pub last_output_at: Instant, +} + +#[derive(Debug)] +pub struct PaneShared { + pub state: Mutex<PaneBufferState>, + pub cv: Condvar, +} + +#[derive(Debug)] +pub struct PaneHandle { + pub pane_id: String, + pub session_id: String, + pub shared: Arc<PaneShared>, + command_tx: Sender<PaneCommand>, +} + +pub enum PaneRuntimeEvent { + Output { + session_id: String, + pane_id: String, + data: Vec<u8>, + }, + Busy { + session_id: String, + pane_id: String, + }, + Idle { + session_id: String, + pane_id: String, + }, + Exit { + session_id: String, + pane_id: String, + }, +} + +pub type EventCallback = Arc<dyn Fn(PaneRuntimeEvent) + Send + Sync>; + +enum ReaderEvent { + Data(Vec<u8>), + Eof, +} + +enum PaneCommand { + Write(Vec<u8>, mpsc::Sender<Result<usize, String>>), + Resize(u16, u16, mpsc::Sender<Result<(), String>>), + Capture(bool, mpsc::Sender<Result<TerminalCapture, String>>), + Close(mpsc::Sender<()>), +} + +struct PaneRuntime { + child: Box<dyn Child + Send>, + master: Box<dyn MasterPty + Send>, + writer: Box<dyn Write + Send>, + terminal: GhosttyTerminal, + metadata: OscTracker, + reader_rx: Receiver<ReaderEvent>, +} + +impl PaneHandle { + pub fn spawn( + session_id: &str, + pane_id: &str, + command: &str, + cols: u16, + rows: u16, + events: EventCallback, + ) -> Result<Arc<Self>, String> { + let shared = Arc::new(PaneShared { + state: Mutex::new(PaneBufferState { + base_offset: 0, + next_offset: 0, + buffer: Vec::new(), + closed: false, + busy: false, + busy_generation: 0, + title: String::new(), + pwd: String::new(), + last_output_at: Instant::now(), + }), + cv: Condvar::new(), + }); + let (command_tx, command_rx) = crossbeam_channel::unbounded(); + let handle = Arc::new(Self { + pane_id: pane_id.to_string(), + session_id: session_id.to_string(), + shared: Arc::clone(&shared), + command_tx, + }); + + let session_id_owned = session_id.to_string(); + let pane_id_owned = pane_id.to_string(); + let command_owned = command.to_string(); + let (startup_tx, startup_rx) = mpsc::channel(); + thread::spawn(move || { + run_pane_actor( + session_id_owned, + pane_id_owned, + command_owned, + cols, + rows, + shared, + command_rx, + events, + startup_tx, + ); + }); + + startup_rx + .recv() + .map_err(|_| "pane runtime startup failed".to_string())??; + Ok(handle) + } + + pub fn write(&self, data: Vec<u8>) -> Result<usize, String> { + let (tx, rx) = mpsc::channel(); + self.command_tx + .send(PaneCommand::Write(data, tx)) + .map_err(|_| "pane runtime is unavailable".to_string())?; + rx.recv().map_err(|_| "pane runtime closed".to_string())? + } + + pub fn resize(&self, cols: u16, rows: u16) -> Result<(), String> { + let (tx, rx) = mpsc::channel(); + self.command_tx + .send(PaneCommand::Resize(cols, rows, tx)) + .map_err(|_| "pane runtime is unavailable".to_string())?; + rx.recv().map_err(|_| "pane runtime closed".to_string())? + } + + pub fn capture(&self, include_history: bool) -> Result<PaneCapture, String> { + let (tx, rx) = mpsc::channel(); + self.command_tx + .send(PaneCommand::Capture(include_history, tx)) + .map_err(|_| "pane runtime is unavailable".to_string())?; + let capture = rx.recv().map_err(|_| "pane runtime closed".to_string())??; + let state = self.shared.state.lock().unwrap(); + Ok(PaneCapture { + pane_id: self.pane_id.clone(), + session_id: self.session_id.clone(), + capture, + closed: state.closed, + offset: state.next_offset, + base_offset: state.base_offset, + }) + } + + pub fn close(&self) { + let (tx, rx) = mpsc::channel(); + if self.command_tx.send(PaneCommand::Close(tx)).is_ok() { + let _ = rx.recv_timeout(Duration::from_secs(1)); + } + } + + pub fn read( + &self, + offset: u64, + max_bytes: usize, + timeout_ms: i32, + ) -> Result<PaneReadResult, String> { + let timeout = if timeout_ms <= 0 { + None + } else { + Some(Duration::from_millis(timeout_ms as u64)) + }; + let deadline = timeout.map(|value| Instant::now() + value); + let mut guard = self.shared.state.lock().unwrap(); + + loop { + let mut effective_offset = offset; + let truncated = effective_offset < guard.base_offset; + if effective_offset < guard.base_offset { + effective_offset = guard.base_offset; + } + if effective_offset < guard.next_offset { + let start = (effective_offset - guard.base_offset) as usize; + let mut end = guard.buffer.len(); + if max_bytes > 0 && end.saturating_sub(start) > max_bytes { + end = start + max_bytes; + } + let data = guard.buffer[start..end].to_vec(); + let offset = effective_offset + (end - start) as u64; + let eof = guard.closed && end == guard.buffer.len(); + return Ok(PaneReadResult { + data, + offset, + base_offset: guard.base_offset, + truncated, + eof, + }); + } + if guard.closed { + return Ok(PaneReadResult { + data: Vec::new(), + offset: guard.next_offset, + base_offset: guard.base_offset, + truncated, + eof: true, + }); + } + + match deadline { + Some(target) => { + let now = Instant::now(); + if now >= target { + return Err("timeout".to_string()); + } + let (next_guard, wait_result) = + self.shared.cv.wait_timeout(guard, target - now).unwrap(); + guard = next_guard; + if wait_result.timed_out() { + return Err("timeout".to_string()); + } + } + None => { + guard = self.shared.cv.wait(guard).unwrap(); + } + } + } + } +} + +fn run_pane_actor( + session_id: String, + pane_id: String, + command: String, + cols: u16, + rows: u16, + shared: Arc<PaneShared>, + command_rx: Receiver<PaneCommand>, + events: EventCallback, + startup_tx: mpsc::Sender<Result<(), String>>, +) { + let mut runtime = match spawn_runtime(&command, cols, rows) { + Ok(runtime) => { + let _ = startup_tx.send(Ok(())); + runtime + } + Err(err) => { + debug_log(&format!( + "pane {pane_id} failed to start command {command:?}: {err}" + )); + { + let mut state = shared.state.lock().unwrap(); + state.closed = true; + } + shared.cv.notify_all(); + let _ = startup_tx.send(Err(err)); + return; + } + }; + let mut runtime_closed = false; + let mut reader_rx = runtime.reader_rx; + while !runtime_closed { + crossbeam_channel::select! { + recv(reader_rx) -> message => { + match message { + Ok(ReaderEvent::Data(data)) => { + let normalized = normalize_line_endings(&data); + let mut emit_busy = false; + let _ = runtime.terminal.feed(&data); + runtime.metadata.feed(&data); + { + let mut state = shared.state.lock().unwrap(); + if !state.busy { + state.busy = true; + state.busy_generation += 1; + emit_busy = true; + } + state.title = runtime.metadata.title().to_string(); + state.pwd = runtime.metadata.pwd().to_string(); + state.buffer.extend_from_slice(&normalized); + state.next_offset += normalized.len() as u64; + state.last_output_at = Instant::now(); + if state.buffer.len() > MAX_RAW_BUFFER_BYTES { + let overflow = state.buffer.len() - MAX_RAW_BUFFER_BYTES; + state.buffer.drain(..overflow); + state.base_offset += overflow as u64; + } + } + shared.cv.notify_all(); + if emit_busy { + events(PaneRuntimeEvent::Busy { + session_id: session_id.clone(), + pane_id: pane_id.clone(), + }); + } + events(PaneRuntimeEvent::Output { + session_id: session_id.clone(), + pane_id: pane_id.clone(), + data: normalized, + }); + } + Ok(ReaderEvent::Eof) | Err(_) => { + debug_log(&format!("pane {pane_id} reader reached EOF")); + reader_rx = crossbeam_channel::never(); + { + let mut state = shared.state.lock().unwrap(); + state.closed = true; + state.busy = false; + } + shared.cv.notify_all(); + events(PaneRuntimeEvent::Exit { + session_id: session_id.clone(), + pane_id: pane_id.clone(), + }); + } + } + } + recv(command_rx) -> message => { + match message { + Ok(PaneCommand::Write(data, reply)) => { + let result = runtime.writer + .write_all(&data) + .and_then(|_| runtime.writer.flush()) + .map(|_| data.len()) + .map_err(|err| err.to_string()); + let _ = reply.send(result); + } + Ok(PaneCommand::Resize(cols, rows, reply)) => { + let result = runtime.master + .resize(PtySize { + rows: rows.max(1), + cols: cols.max(2), + pixel_width: 0, + pixel_height: 0, + }) + .map_err(|err| err.to_string()) + .and_then(|_| runtime.terminal.resize(cols.max(2), rows.max(1))) + .map(|_| notify_winch(runtime.master.as_ref(), runtime.child.as_mut())); + let _ = reply.send(result); + } + Ok(PaneCommand::Capture(include_history, reply)) => { + let result = runtime.terminal.capture(include_history).map(|raw| { + capture_terminal( + raw, + runtime.metadata.title().to_string(), + runtime.metadata.pwd().to_string(), + ) + }); + let _ = reply.send(result); + } + Ok(PaneCommand::Close(reply)) => { + debug_log(&format!("pane {pane_id} received close command")); + { + let mut state = shared.state.lock().unwrap(); + state.closed = true; + state.busy = false; + } + shared.cv.notify_all(); + let _ = runtime.child.kill(); + let _ = reply.send(()); + runtime_closed = true; + } + Err(_) => runtime_closed = true, + } + } + default(Duration::from_millis(50)) => { + let emit_idle = { + let mut state = shared.state.lock().unwrap(); + if state.closed || !state.busy || state.last_output_at.elapsed() < IDLE_SETTLE_DURATION { + false + } else { + state.busy = false; + true + } + }; + if emit_idle { + shared.cv.notify_all(); + events(PaneRuntimeEvent::Idle { + session_id: session_id.clone(), + pane_id: pane_id.clone(), + }); + } + } + } + } + + let _ = runtime.child.kill(); + let _ = runtime.child.wait(); +} + +fn notify_winch(master: &dyn MasterPty, child: &mut dyn Child) { + #[cfg(unix)] + if let Some(pgid) = master.process_group_leader() { + unsafe { + let _ = libc::kill(-pgid, libc::SIGWINCH); + } + } else if let Some(pid) = child.process_id() { + unsafe { + let _ = libc::kill(pid as libc::pid_t, libc::SIGWINCH); + } + } +} + +fn spawn_runtime(command: &str, cols: u16, rows: u16) -> Result<PaneRuntime, String> { + let pty_system = native_pty_system(); + let pair = pty_system + .openpty(PtySize { + rows, + cols, + pixel_width: 0, + pixel_height: 0, + }) + .map_err(|err| err.to_string())?; + + let mut cmd = CommandBuilder::new("/bin/sh"); + cmd.arg("-lc"); + cmd.arg(command); + let child = pair + .slave + .spawn_command(cmd) + .map_err(|err| err.to_string())?; + drop(pair.slave); + + let master = pair.master; + let reader = master.try_clone_reader().map_err(|err| err.to_string())?; + let writer = master.take_writer().map_err(|err| err.to_string())?; + let terminal = GhosttyTerminal::new(cols, rows, 100_000)?; + let metadata = OscTracker::default(); + + let (reader_tx, reader_rx) = crossbeam_channel::unbounded(); + thread::spawn(move || reader_loop(reader, reader_tx)); + + Ok(PaneRuntime { + child, + master, + writer, + terminal, + metadata, + reader_rx, + }) +} + +fn reader_loop(mut reader: Box<dyn Read + Send>, tx: Sender<ReaderEvent>) { + let mut buf = vec![0_u8; 32 * 1024]; + loop { + match reader.read(&mut buf) { + Ok(0) => { + let _ = tx.send(ReaderEvent::Eof); + return; + } + Ok(len) => { + let _ = tx.send(ReaderEvent::Data(buf[..len].to_vec())); + } + Err(_) => { + let _ = tx.send(ReaderEvent::Eof); + return; + } + } + } +} + +fn debug_log(message: &str) { + if env::var_os("CMUX_REMOTE_DEBUG_LOG").is_some() { + eprintln!("cmuxd-remote debug: {message}"); + } +} + +fn normalize_line_endings(data: &[u8]) -> Vec<u8> { + if !data.windows(2).any(|window| window == b"\r\n") { + return data.to_vec(); + } + let mut out = Vec::with_capacity(data.len()); + let mut idx = 0; + while idx < data.len() { + if data[idx] == b'\r' && idx + 1 < data.len() && data[idx + 1] == b'\n' { + idx += 1; + } + out.push(data[idx]); + idx += 1; + } + out +} diff --git a/daemon/remote/rust/src/proxy.rs b/daemon/remote/rust/src/proxy.rs new file mode 100644 index 000000000..80cfd4be1 --- /dev/null +++ b/daemon/remote/rust/src/proxy.rs @@ -0,0 +1,129 @@ +use std::collections::BTreeMap; +use std::io::{Read, Write}; +use std::net::{TcpStream, ToSocketAddrs}; +use std::sync::Mutex; +use std::time::Duration; + +#[derive(Debug)] +pub enum ProxyError { + NotFound, + Io(std::io::Error), +} + +impl std::fmt::Display for ProxyError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProxyError::NotFound => write!(f, "stream not found"), + ProxyError::Io(err) => write!(f, "{err}"), + } + } +} + +impl std::error::Error for ProxyError {} + +pub struct ProxyReadResult { + pub data: Vec<u8>, + pub eof: bool, +} + +pub struct ProxyManager { + next_id: Mutex<u64>, + streams: Mutex<BTreeMap<String, TcpStream>>, +} + +impl ProxyManager { + pub fn new() -> Self { + Self { + next_id: Mutex::new(1), + streams: Mutex::new(BTreeMap::new()), + } + } + + pub fn open(&self, host: &str, port: u16, timeout_ms: u64) -> Result<String, ProxyError> { + let addr = (host, port) + .to_socket_addrs() + .map_err(ProxyError::Io)? + .next() + .ok_or_else(|| { + ProxyError::Io(std::io::Error::new( + std::io::ErrorKind::NotFound, + "address not found", + )) + })?; + let stream = TcpStream::connect_timeout(&addr, Duration::from_millis(timeout_ms)) + .map_err(ProxyError::Io)?; + let stream_id = { + let mut next = self.next_id.lock().unwrap(); + let value = format!("stream-{next}"); + *next += 1; + value + }; + self.streams + .lock() + .unwrap() + .insert(stream_id.clone(), stream); + Ok(stream_id) + } + + pub fn close(&self, stream_id: &str) -> Result<(), ProxyError> { + self.streams + .lock() + .unwrap() + .remove(stream_id) + .map(|_| ()) + .ok_or(ProxyError::NotFound) + } + + pub fn write(&self, stream_id: &str, data: &[u8]) -> Result<usize, ProxyError> { + let mut stream = self.clone_stream(stream_id)?; + stream.write_all(data).map_err(ProxyError::Io)?; + Ok(data.len()) + } + + pub fn read( + &self, + stream_id: &str, + max_bytes: usize, + timeout_ms: i32, + ) -> Result<ProxyReadResult, ProxyError> { + let mut stream = self.clone_stream(stream_id)?; + if timeout_ms >= 0 { + stream + .set_read_timeout(Some(Duration::from_millis(timeout_ms as u64))) + .map_err(ProxyError::Io)?; + } else { + stream.set_read_timeout(None).map_err(ProxyError::Io)?; + } + + let mut buf = vec![0_u8; max_bytes]; + match stream.read(&mut buf) { + Ok(0) => Ok(ProxyReadResult { + data: Vec::new(), + eof: true, + }), + Ok(len) => { + buf.truncate(len); + Ok(ProxyReadResult { + data: buf, + eof: false, + }) + } + Err(err) + if err.kind() == std::io::ErrorKind::WouldBlock + || err.kind() == std::io::ErrorKind::TimedOut => + { + Ok(ProxyReadResult { + data: Vec::new(), + eof: false, + }) + } + Err(err) => Err(ProxyError::Io(err)), + } + } + + fn clone_stream(&self, stream_id: &str) -> Result<TcpStream, ProxyError> { + let streams = self.streams.lock().unwrap(); + let stream = streams.get(stream_id).ok_or(ProxyError::NotFound)?; + stream.try_clone().map_err(ProxyError::Io) + } +} diff --git a/daemon/remote/rust/src/rpc.rs b/daemon/remote/rust/src/rpc.rs new file mode 100644 index 000000000..3b1b34e47 --- /dev/null +++ b/daemon/remote/rust/src/rpc.rs @@ -0,0 +1,119 @@ +use std::io::{self, BufRead, Write}; + +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +pub const MAX_FRAME_BYTES: usize = 4 * 1024 * 1024; + +#[derive(Debug, Clone, Deserialize)] +pub struct Request { + #[serde(default)] + pub id: Option<Value>, + #[serde(default)] + pub method: String, + #[serde(default = "empty_object")] + pub params: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ErrorPayload { + pub code: String, + pub message: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Response { + #[serde(skip_serializing_if = "Option::is_none")] + pub id: Option<Value>, + pub ok: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub result: Option<Value>, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option<ErrorPayload>, +} + +pub enum FrameRead { + Eof, + Frame(Vec<u8>), + Oversized, +} + +pub fn read_frame<R: BufRead>(reader: &mut R) -> io::Result<FrameRead> { + let mut frame = Vec::with_capacity(1024); + loop { + let available = reader.fill_buf()?; + if available.is_empty() { + if frame.is_empty() { + return Ok(FrameRead::Eof); + } + return Ok(FrameRead::Frame(frame)); + } + + if let Some(newline) = available.iter().position(|byte| *byte == b'\n') { + let take = newline + 1; + if frame.len() + take > MAX_FRAME_BYTES { + reader.consume(take); + return Ok(FrameRead::Oversized); + } + frame.extend_from_slice(&available[..take]); + reader.consume(take); + return Ok(FrameRead::Frame(frame)); + } + + if frame.len() + available.len() > MAX_FRAME_BYTES { + let len = available.len(); + reader.consume(len); + discard_until_newline(reader)?; + return Ok(FrameRead::Oversized); + } + frame.extend_from_slice(available); + let len = available.len(); + reader.consume(len); + } +} + +pub fn write_response<W: Write>(writer: &mut W, response: &Response) -> io::Result<()> { + serde_json::to_writer(&mut *writer, response)?; + writer.write_all(b"\n")?; + writer.flush() +} + +pub fn ok(id: Option<Value>, result: Value) -> Response { + Response { + id, + ok: true, + result: Some(result), + error: None, + } +} + +pub fn error(id: Option<Value>, code: &str, message: impl Into<String>) -> Response { + Response { + id, + ok: false, + result: None, + error: Some(ErrorPayload { + code: code.to_string(), + message: message.into(), + }), + } +} + +fn empty_object() -> Value { + Value::Object(Default::default()) +} + +fn discard_until_newline<R: BufRead>(reader: &mut R) -> io::Result<()> { + loop { + let available = reader.fill_buf()?; + if available.is_empty() { + return Ok(()); + } + if let Some(newline) = available.iter().position(|byte| *byte == b'\n') { + reader.consume(newline + 1); + return Ok(()); + } + let len = available.len(); + reader.consume(len); + } +} diff --git a/daemon/remote/rust/src/server.rs b/daemon/remote/rust/src/server.rs new file mode 100644 index 000000000..66dd85aba --- /dev/null +++ b/daemon/remote/rust/src/server.rs @@ -0,0 +1,4354 @@ +use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::env; +use std::fs; +use std::io::{BufRead, BufReader, Read, Write}; +use std::net::TcpListener; +use std::os::unix::net::UnixListener; +use std::path::Path; +use std::process::{Command, Stdio}; +use std::sync::{Arc, Condvar, Mutex}; +use std::thread; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use base64::Engine; +use rustls::pki_types::{CertificateDer, PrivateKeyDer}; +use serde_json::{Value, json}; +use sha1::{Digest, Sha1}; + +use crate::auth::{TicketClaims, has_session_capability, verify_ticket}; +use crate::pane::{EventCallback, PaneHandle, PaneRuntimeEvent}; +use crate::proxy::{ProxyError, ProxyManager}; +use crate::rpc::{ + FrameRead, Request, Response, error as rpc_error, ok as rpc_ok, read_frame, write_response, +}; +use crate::session::{PaneSlot, Session, SessionError, SessionListEntry, SessionSnapshot, Window}; + +#[derive(Default)] +pub struct UnixServeConfig { + pub socket_path: String, + pub ws_port: Option<u16>, + pub ws_secret: Option<String>, +} + +#[derive(Default)] +pub struct TlsServeConfig { + pub listen_addr: String, + pub server_id: String, + pub ticket_secret: String, + pub cert_file: String, + pub key_file: String, +} + +#[derive(Clone)] +pub struct Daemon { + inner: Arc<DaemonInner>, +} + +struct DaemonInner { + version: String, + state: Mutex<CoreState>, + state_cv: Condvar, + proxies: ProxyManager, +} + +struct CoreState { + next_session_id: u64, + next_attachment_id: u64, + next_window_id: u64, + next_pane_id: u64, + next_event_id: u64, + sessions: BTreeMap<String, Arc<Session>>, + buffers: BTreeMap<String, String>, + pane_pipes: BTreeMap<String, Arc<Mutex<std::process::ChildStdin>>>, + wait_signals: BTreeMap<String, u64>, + used_nonces: BTreeMap<String, i64>, + event_base_cursor: u64, + events: VecDeque<Value>, +} + +impl Daemon { + pub fn new(version: &str) -> Self { + Self { + inner: Arc::new(DaemonInner { + version: version.to_string(), + state: Mutex::new(CoreState { + next_session_id: 1, + next_attachment_id: 1, + next_window_id: 1, + next_pane_id: 1, + next_event_id: 1, + sessions: BTreeMap::new(), + buffers: BTreeMap::new(), + pane_pipes: BTreeMap::new(), + wait_signals: BTreeMap::new(), + used_nonces: BTreeMap::new(), + event_base_cursor: 0, + events: VecDeque::new(), + }), + state_cv: Condvar::new(), + proxies: ProxyManager::new(), + }), + } + } + + pub fn serve_stdio<R: Read, W: Write>(&self, input: R, mut output: W) -> Result<(), String> { + let mut reader = BufReader::new(input); + loop { + let response = match read_frame(&mut reader) { + Ok(FrameRead::Eof) => return Ok(()), + Ok(FrameRead::Oversized) => rpc_error( + None, + "invalid_request", + "request frame exceeds maximum size", + ), + Ok(FrameRead::Frame(frame)) => self.parse_and_dispatch(&frame, None), + Err(err) => return Err(err.to_string()), + }; + write_response(&mut output, &response).map_err(|err| err.to_string())?; + } + } + + pub fn serve_unix(&self, cfg: UnixServeConfig) -> Result<(), String> { + if cfg.socket_path.trim().is_empty() { + return Err("missing daemon socket path".to_string()); + } + if let (Some(ws_port), Some(ws_secret)) = (cfg.ws_port, cfg.ws_secret.as_ref()) { + if !ws_secret.is_empty() { + let daemon = self.clone(); + let ws_secret = ws_secret.clone(); + thread::spawn(move || { + let _ = daemon.serve_websocket(ws_port, &ws_secret); + }); + } + } + if let Some(parent) = Path::new(&cfg.socket_path).parent() { + fs::create_dir_all(parent).map_err(|err| err.to_string())?; + } + if Path::new(&cfg.socket_path).exists() { + let _ = fs::remove_file(&cfg.socket_path); + } + + let listener = UnixListener::bind(&cfg.socket_path).map_err(|err| err.to_string())?; + for stream in listener.incoming() { + match stream { + Ok(stream) => { + let daemon = self.clone(); + thread::spawn(move || { + if let Err(err) = daemon.serve_stream(stream, None) { + debug_log(&format!("unix stream closed with error: {err}")); + } + }); + } + Err(err) => return Err(err.to_string()), + } + } + Ok(()) + } + + fn serve_websocket(&self, port: u16, secret: &str) -> Result<(), String> { + let listener = TcpListener::bind(("0.0.0.0", port)).map_err(|err| err.to_string())?; + for stream in listener.incoming() { + match stream { + Ok(stream) => { + let daemon = self.clone(); + let secret = secret.to_string(); + thread::spawn(move || { + if let Err(err) = daemon.serve_websocket_stream(stream, &secret) { + debug_log(&format!("websocket stream closed with error: {err}")); + } + }); + } + Err(err) => return Err(err.to_string()), + } + } + Ok(()) + } + + pub fn serve_tls(&self, cfg: TlsServeConfig) -> Result<(), String> { + if cfg.listen_addr.is_empty() + || cfg.server_id.is_empty() + || cfg.ticket_secret.is_empty() + || cfg.cert_file.is_empty() + || cfg.key_file.is_empty() + { + return Err( + "tls listener requires listen address, cert, key, server id, and ticket secret" + .to_string(), + ); + } + + let cert_chain = load_certs(&cfg.cert_file)?; + let private_key = load_key(&cfg.key_file)?; + let config = rustls::ServerConfig::builder() + .with_no_client_auth() + .with_single_cert(cert_chain, private_key) + .map_err(|err| err.to_string())?; + let config = Arc::new(config); + let listener = TcpListener::bind(&cfg.listen_addr).map_err(|err| err.to_string())?; + + for stream in listener.incoming() { + match stream { + Ok(stream) => { + let daemon = self.clone(); + let config = Arc::clone(&config); + let server_id = cfg.server_id.clone(); + let ticket_secret = cfg.ticket_secret.clone(); + thread::spawn(move || { + let connection = + rustls::ServerConnection::new(config).map_err(|err| err.to_string()); + if let Ok(connection) = connection { + let stream = rustls::StreamOwned::new(connection, stream); + if let Err(err) = daemon.serve_tls_stream( + stream, + &server_id, + ticket_secret.as_bytes(), + ) { + debug_log(&format!("tls stream closed with error: {err}")); + } + } else if let Err(err) = connection { + debug_log(&format!("failed to create tls server connection: {err}")); + } + }); + } + Err(err) => return Err(err.to_string()), + } + } + Ok(()) + } + + #[allow(dead_code)] + pub fn dispatch_json(&self, method: &str, params: Value) -> Result<Value, String> { + let request = Request { + id: Some(json!(1)), + method: method.to_string(), + params, + }; + let response = self.handle_request(&request); + if response.ok { + Ok(response.result.unwrap_or_else(|| json!({}))) + } else { + Err(response + .error + .map(|value| value.message) + .unwrap_or_else(|| "request failed".to_string())) + } + } + + pub fn signal_wait(&self, name: &str) -> u64 { + let mut state = self.inner.state.lock().unwrap(); + let next = state.wait_signals.get(name).copied().unwrap_or(0) + 1; + state.wait_signals.insert(name.to_string(), next); + self.emit_event_locked( + &mut state, + "wait.signal", + json!({ "name": name, "generation": next }), + ); + self.inner.state_cv.notify_all(); + next + } + + pub fn sessions(&self) -> Vec<Arc<Session>> { + self.inner + .state + .lock() + .unwrap() + .sessions + .values() + .cloned() + .collect() + } + + pub fn find_session(&self, session_id: &str) -> Option<Arc<Session>> { + self.inner + .state + .lock() + .unwrap() + .sessions + .get(session_id) + .cloned() + } + + pub fn find_pane_by_id( + &self, + pane_id: &str, + ) -> Option<(Arc<Session>, String, Arc<PaneHandle>)> { + for session in self.sessions() { + let inner = session.inner.lock().unwrap(); + for window in &inner.windows { + for pane in &window.panes { + if pane.pane_id == pane_id { + return Some(( + Arc::clone(&session), + window.id.clone(), + Arc::clone(&pane.handle), + )); + } + } + } + } + None + } + + fn serve_stream<S: Read + Write>( + &self, + stream: S, + authorizer: Option<DirectAuthorizer>, + ) -> Result<(), String> { + self.serve_reader(BufReader::new(stream), authorizer) + } + + fn serve_reader<S: Read + Write>( + &self, + mut reader: BufReader<S>, + authorizer: Option<DirectAuthorizer>, + ) -> Result<(), String> { + let mut authorizer = authorizer; + let mut last_method: Option<String> = None; + loop { + let response = match read_frame(&mut reader) { + Ok(FrameRead::Eof) => return Ok(()), + Ok(FrameRead::Oversized) => rpc_error( + None, + "invalid_request", + "request frame exceeds maximum size", + ), + Ok(FrameRead::Frame(frame)) => { + last_method = request_method_for_frame(&frame); + self.parse_and_dispatch(&frame, authorizer.as_mut()) + } + Err(err) => { + let context = last_method.as_deref().unwrap_or("unknown"); + return Err(format!("read_frame after {context}: {err}")); + } + }; + if let Err(err) = write_response(reader.get_mut(), &response) { + let context = last_method.as_deref().unwrap_or("unknown"); + return Err(format!("write_response for {context}: {err}")); + } + } + } + + fn serve_tls_stream<S: Read + Write>( + &self, + stream: S, + expected_server_id: &str, + ticket_secret: &[u8], + ) -> Result<(), String> { + let mut reader = BufReader::new(stream); + let frame = match read_frame(&mut reader) { + Ok(FrameRead::Frame(frame)) => frame, + Ok(FrameRead::Oversized) => { + write_response( + reader.get_mut(), + &rpc_error( + None, + "invalid_request", + "handshake frame exceeds maximum size", + ), + ) + .map_err(|err| err.to_string())?; + return Ok(()); + } + Ok(FrameRead::Eof) => return Ok(()), + Err(err) => return Err(err.to_string()), + }; + let value: Value = match serde_json::from_slice(trim_crlf(&frame)) { + Ok(value) => value, + Err(_) => { + write_response( + reader.get_mut(), + &rpc_error(None, "invalid_request", "invalid JSON handshake"), + ) + .map_err(|err| err.to_string())?; + return Ok(()); + } + }; + let Some(ticket) = value.get("ticket").and_then(Value::as_str) else { + write_response( + reader.get_mut(), + &rpc_error(None, "invalid_request", "ticket is required"), + ) + .map_err(|err| err.to_string())?; + return Ok(()); + }; + let claims = match verify_ticket(ticket, ticket_secret, expected_server_id) { + Ok(claims) => claims, + Err(err) => { + write_response( + reader.get_mut(), + &rpc_error(None, "unauthorized", err.to_string()), + ) + .map_err(|write_err| write_err.to_string())?; + return Ok(()); + } + }; + if !has_session_capability(&claims.capabilities) { + write_response( + reader.get_mut(), + &rpc_error(None, "unauthorized", "ticket missing session capability"), + ) + .map_err(|err| err.to_string())?; + return Ok(()); + } + if claims.nonce.trim().is_empty() { + write_response( + reader.get_mut(), + &rpc_error(None, "unauthorized", "ticket nonce is required"), + ) + .map_err(|err| err.to_string())?; + return Ok(()); + } + if let Err(message) = self.consume_nonce(&claims.nonce, claims.exp) { + write_response(reader.get_mut(), &rpc_error(None, "unauthorized", message)) + .map_err(|err| err.to_string())?; + return Ok(()); + } + write_response( + reader.get_mut(), + &rpc_ok(None, json!({ "authenticated": true })), + ) + .map_err(|err| err.to_string())?; + self.serve_reader(reader, Some(DirectAuthorizer::new(claims))) + } + + fn parse_and_dispatch( + &self, + frame: &[u8], + authorizer: Option<&mut DirectAuthorizer>, + ) -> Response { + let request = match serde_json::from_slice::<Request>(trim_crlf(frame)) { + Ok(value) => value, + Err(_) => return rpc_error(None, "invalid_request", "invalid JSON request"), + }; + if let Some(authorizer) = authorizer { + authorizer.handle(self, &request) + } else { + self.handle_request(&request) + } + } + + fn handle_request(&self, request: &Request) -> Response { + if request.method.is_empty() { + return rpc_error(request.id.clone(), "invalid_request", "method is required"); + } + match request.method.as_str() { + "hello" => rpc_ok( + request.id.clone(), + json!({ + "name": "cmuxd-remote", + "version": self.inner.version, + "capabilities": [ + "session.basic", + "session.resize.min", + "terminal.stream", + "proxy.http_connect", + "proxy.socks5", + "proxy.stream", + "amux.capture", + "amux.wait", + "amux.events.read", + "tmux.exec", + ], + }), + ), + "ping" => rpc_ok(request.id.clone(), json!({ "pong": true })), + "proxy.open" => self.handle_proxy_open(request), + "proxy.close" => self.handle_proxy_close(request), + "proxy.write" => self.handle_proxy_write(request), + "proxy.read" => self.handle_proxy_read(request), + "session.open" => self.handle_session_open(request), + "session.close" => self.handle_session_close(request), + "session.attach" => self.handle_session_attach(request), + "session.resize" => self.handle_session_resize(request), + "session.detach" => self.handle_session_detach(request), + "session.status" => self.handle_session_status(request), + "session.list" => self.handle_session_list(request), + "session.history" => self.handle_session_history(request), + "terminal.open" => self.handle_terminal_open(request), + "terminal.read" => self.handle_terminal_read(request), + "terminal.write" => self.handle_terminal_write(request), + "amux.capture" => self.handle_amux_capture(request), + "amux.wait" => self.handle_amux_wait(request), + "amux.events.read" => self.handle_amux_events_read(request), + "tmux.exec" => self.handle_tmux_exec(request), + _ => rpc_error(request.id.clone(), "method_not_found", "unknown method"), + } + } + + fn handle_proxy_open(&self, request: &Request) -> Response { + let Some(host) = get_string(&request.params, "host") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "proxy.open requires host", + ); + }; + let Some(port) = get_positive_u16(&request.params, "port") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "proxy.open requires port in range 1-65535", + ); + }; + let timeout_ms = + get_non_negative_i64(&request.params, "timeout_ms").unwrap_or(10_000) as u64; + match self.inner.proxies.open(host, port, timeout_ms) { + Ok(stream_id) => rpc_ok(request.id.clone(), json!({ "stream_id": stream_id })), + Err(err) => rpc_error(request.id.clone(), "open_failed", err.to_string()), + } + } + + fn handle_proxy_close(&self, request: &Request) -> Response { + let Some(stream_id) = get_string(&request.params, "stream_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "proxy.close requires stream_id", + ); + }; + match self.inner.proxies.close(stream_id) { + Ok(()) => rpc_ok(request.id.clone(), json!({ "closed": true })), + Err(_) => rpc_error(request.id.clone(), "not_found", "stream not found"), + } + } + + fn handle_proxy_write(&self, request: &Request) -> Response { + let Some(stream_id) = get_string(&request.params, "stream_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "proxy.write requires stream_id", + ); + }; + let Some(encoded) = get_string(&request.params, "data_base64") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "proxy.write requires data_base64", + ); + }; + let data = match base64::engine::general_purpose::STANDARD.decode(encoded) { + Ok(value) => value, + Err(_) => { + return rpc_error( + request.id.clone(), + "invalid_params", + "data_base64 must be valid base64", + ); + } + }; + match self.inner.proxies.write(stream_id, &data) { + Ok(written) => rpc_ok(request.id.clone(), json!({ "written": written })), + Err(ProxyError::NotFound) => { + rpc_error(request.id.clone(), "not_found", "stream not found") + } + Err(err) => rpc_error(request.id.clone(), "stream_error", err.to_string()), + } + } + + fn handle_proxy_read(&self, request: &Request) -> Response { + let Some(stream_id) = get_string(&request.params, "stream_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "proxy.read requires stream_id", + ); + }; + let max_bytes = get_positive_usize(&request.params, "max_bytes").unwrap_or(32_768); + if max_bytes > 262_144 { + return rpc_error( + request.id.clone(), + "invalid_params", + "max_bytes must be in range 1-262144", + ); + } + let timeout_ms = get_non_negative_i64(&request.params, "timeout_ms").unwrap_or(50) as i32; + match self.inner.proxies.read(stream_id, max_bytes, timeout_ms) { + Ok(read) => rpc_ok( + request.id.clone(), + json!({ + "data_base64": base64::engine::general_purpose::STANDARD.encode(read.data), + "eof": read.eof, + }), + ), + Err(ProxyError::NotFound) => { + rpc_error(request.id.clone(), "not_found", "stream not found") + } + Err(err) => rpc_error(request.id.clone(), "stream_error", err.to_string()), + } + } + + fn handle_session_open(&self, request: &Request) -> Response { + let session_id = get_string(&request.params, "session_id").map(ToString::to_string); + match self.ensure_session(session_id.as_deref()) { + Ok(snapshot) => rpc_ok(request.id.clone(), snapshot_value(snapshot, None, None)), + Err(err) => rpc_error(request.id.clone(), "internal_error", err), + } + } + + fn handle_session_close(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.close requires session_id", + ); + }; + match self.close_session(session_id) { + Ok(()) => rpc_ok( + request.id.clone(), + json!({ "session_id": session_id, "closed": true }), + ), + Err(_) => rpc_error(request.id.clone(), "not_found", "session not found"), + } + } + + fn handle_session_attach(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.attach requires session_id", + ); + }; + let Some(attachment_id) = get_string(&request.params, "attachment_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.attach requires attachment_id", + ); + }; + let Some(cols) = get_positive_u16(&request.params, "cols") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.attach requires cols > 0", + ); + }; + let Some(rows) = get_positive_u16(&request.params, "rows") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.attach requires rows > 0", + ); + }; + match self.attach_session(session_id, attachment_id, cols, rows) { + Ok(snapshot) => rpc_ok(request.id.clone(), snapshot_value(snapshot, None, None)), + Err(SessionError::NotFound) => { + rpc_error(request.id.clone(), "not_found", "session not found") + } + Err(SessionError::AttachmentNotFound) => { + rpc_error(request.id.clone(), "not_found", "attachment not found") + } + Err(SessionError::InvalidSize) => rpc_error( + request.id.clone(), + "invalid_params", + "cols and rows must be greater than zero", + ), + } + } + + fn handle_session_resize(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.resize requires session_id", + ); + }; + let Some(attachment_id) = get_string(&request.params, "attachment_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.resize requires attachment_id", + ); + }; + let Some(cols) = get_positive_u16(&request.params, "cols") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.resize requires cols > 0", + ); + }; + let Some(rows) = get_positive_u16(&request.params, "rows") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.resize requires rows > 0", + ); + }; + match self.resize_session(session_id, attachment_id, cols, rows) { + Ok(snapshot) => rpc_ok(request.id.clone(), snapshot_value(snapshot, None, None)), + Err(SessionError::NotFound) => { + rpc_error(request.id.clone(), "not_found", "session not found") + } + Err(SessionError::AttachmentNotFound) => { + rpc_error(request.id.clone(), "not_found", "attachment not found") + } + Err(SessionError::InvalidSize) => rpc_error( + request.id.clone(), + "invalid_params", + "cols and rows must be greater than zero", + ), + } + } + + fn handle_session_detach(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.detach requires session_id", + ); + }; + let Some(attachment_id) = get_string(&request.params, "attachment_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.detach requires attachment_id", + ); + }; + match self.detach_session(session_id, attachment_id) { + Ok(snapshot) => rpc_ok(request.id.clone(), snapshot_value(snapshot, None, None)), + Err(SessionError::NotFound) => { + rpc_error(request.id.clone(), "not_found", "session not found") + } + Err(SessionError::AttachmentNotFound) => { + rpc_error(request.id.clone(), "not_found", "attachment not found") + } + Err(SessionError::InvalidSize) => rpc_error( + request.id.clone(), + "invalid_params", + "cols and rows must be greater than zero", + ), + } + } + + fn handle_session_status(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.status requires session_id", + ); + }; + match self.find_session(session_id) { + Some(session) => rpc_ok( + request.id.clone(), + snapshot_value(session.snapshot(), None, None), + ), + None => rpc_error(request.id.clone(), "not_found", "session not found"), + } + } + + fn handle_session_list(&self, request: &Request) -> Response { + let sessions: Vec<SessionListEntry> = self + .sessions() + .into_iter() + .map(|session| session.list_entry()) + .collect(); + rpc_ok(request.id.clone(), json!({ "sessions": sessions })) + } + + fn handle_session_history(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "session.history requires session_id", + ); + }; + let Some((_, _, pane)) = self.resolve_active_pane(session_id) else { + return rpc_error( + request.id.clone(), + "not_found", + "terminal session not found", + ); + }; + match pane.capture(true) { + Ok(capture) => { + let history = join_history(&capture.capture.history, &capture.capture.visible); + rpc_ok( + request.id.clone(), + json!({ "session_id": session_id, "history": history }), + ) + } + Err(err) => rpc_error(request.id.clone(), "internal_error", err), + } + } + + fn handle_terminal_open(&self, request: &Request) -> Response { + let Some(command) = get_string(&request.params, "command") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.open requires command", + ); + }; + let Some(cols) = get_positive_u16(&request.params, "cols") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.open requires cols > 0", + ); + }; + let Some(rows) = get_positive_u16(&request.params, "rows") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.open requires rows > 0", + ); + }; + let requested_session_id = get_string(&request.params, "session_id"); + + match self.open_terminal(requested_session_id, command, cols, rows) { + Ok((snapshot, attachment_id)) => rpc_ok( + request.id.clone(), + snapshot_value(snapshot, Some(attachment_id), Some(0)), + ), + Err(OpenTerminalError::AlreadyExists) => rpc_error( + request.id.clone(), + "already_exists", + "session already exists", + ), + Err(OpenTerminalError::Other(err)) => { + rpc_error(request.id.clone(), "internal_error", err) + } + } + } + + fn handle_terminal_read(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.read requires session_id", + ); + }; + let Some(offset) = get_non_negative_u64(&request.params, "offset") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.read requires offset >= 0", + ); + }; + let max_bytes = get_positive_usize(&request.params, "max_bytes").unwrap_or(65_536); + let timeout_ms = get_non_negative_i64(&request.params, "timeout_ms").unwrap_or(0) as i32; + let Some((_, _, pane)) = self.resolve_active_pane(session_id) else { + return rpc_error( + request.id.clone(), + "not_found", + "terminal session not found", + ); + }; + match pane.read(offset, max_bytes, timeout_ms) { + Ok(read) => rpc_ok( + request.id.clone(), + json!({ + "session_id": session_id, + "offset": read.offset, + "base_offset": read.base_offset, + "truncated": read.truncated, + "eof": read.eof, + "data": base64::engine::general_purpose::STANDARD.encode(read.data), + }), + ), + Err(err) if err == "timeout" => rpc_error( + request.id.clone(), + "deadline_exceeded", + "terminal read timed out", + ), + Err(err) => rpc_error(request.id.clone(), "internal_error", err), + } + } + + fn handle_terminal_write(&self, request: &Request) -> Response { + let Some(session_id) = get_string(&request.params, "session_id") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.write requires session_id", + ); + }; + let Some(encoded) = get_string(&request.params, "data") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.write requires data", + ); + }; + let data = match base64::engine::general_purpose::STANDARD.decode(encoded) { + Ok(value) => value, + Err(_) => { + return rpc_error( + request.id.clone(), + "invalid_params", + "terminal.write data must be base64", + ); + } + }; + let Some((_, _, pane)) = self.resolve_active_pane(session_id) else { + return rpc_error( + request.id.clone(), + "not_found", + "terminal session not found", + ); + }; + match pane.write(data.clone()) { + Ok(written) => rpc_ok( + request.id.clone(), + json!({ "session_id": session_id, "written": written }), + ), + Err(err) => rpc_error(request.id.clone(), "internal_error", err), + } + } + + fn handle_amux_capture(&self, request: &Request) -> Response { + let include_history = get_bool(&request.params, "history").unwrap_or(true); + let pane = if let Some(pane_id) = get_string(&request.params, "pane_id") { + self.find_pane_by_id(pane_id) + } else if let Some(session_id) = get_string(&request.params, "session_id") { + self.resolve_active_pane(session_id) + } else { + None + }; + let Some((_session, _window_id, pane)) = pane else { + return rpc_error(request.id.clone(), "not_found", "pane not found"); + }; + match pane.capture(include_history) { + Ok(capture) => rpc_ok( + request.id.clone(), + serde_json::to_value(capture).unwrap_or_else(|_| json!({})), + ), + Err(err) => rpc_error(request.id.clone(), "internal_error", err), + } + } + + fn handle_amux_wait(&self, request: &Request) -> Response { + let Some(kind) = get_string(&request.params, "kind") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "amux.wait requires kind", + ); + }; + let timeout_ms = + get_non_negative_i64(&request.params, "timeout_ms").unwrap_or(30_000) as u64; + match kind { + "signal" => { + let Some(name) = get_string(&request.params, "name") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "signal wait requires name", + ); + }; + let after_generation = get_non_negative_u64(&request.params, "after_generation") + .unwrap_or_else(|| self.current_signal_generation(name)); + match self.wait_for_signal( + name, + after_generation, + Duration::from_millis(timeout_ms), + ) { + Ok(generation) => rpc_ok( + request.id.clone(), + json!({ "name": name, "generation": generation }), + ), + Err(err) => rpc_error(request.id.clone(), "deadline_exceeded", err), + } + } + "content" => { + let Some(needle) = get_string(&request.params, "needle") else { + return rpc_error( + request.id.clone(), + "invalid_params", + "content wait requires needle", + ); + }; + let pane = if let Some(pane_id) = get_string(&request.params, "pane_id") { + self.find_pane_by_id(pane_id) + } else if let Some(session_id) = get_string(&request.params, "session_id") { + self.resolve_active_pane(session_id) + } else { + None + }; + let Some((_session, _window, pane)) = pane else { + return rpc_error(request.id.clone(), "not_found", "pane not found"); + }; + match self.wait_for_content(&pane, needle, Duration::from_millis(timeout_ms)) { + Ok(()) => rpc_ok(request.id.clone(), json!({ "matched": true })), + Err(err) => rpc_error(request.id.clone(), "deadline_exceeded", err), + } + } + "exited" => { + let pane = if let Some(pane_id) = get_string(&request.params, "pane_id") { + self.find_pane_by_id(pane_id) + } else if let Some(session_id) = get_string(&request.params, "session_id") { + self.resolve_active_pane(session_id) + } else { + None + }; + let Some((_session, _window, pane)) = pane else { + return rpc_error(request.id.clone(), "not_found", "pane not found"); + }; + match self.wait_for_exit(&pane, Duration::from_millis(timeout_ms)) { + Ok(()) => rpc_ok(request.id.clone(), json!({ "exited": true })), + Err(err) => rpc_error(request.id.clone(), "deadline_exceeded", err), + } + } + "busy" => { + let pane = if let Some(pane_id) = get_string(&request.params, "pane_id") { + self.find_pane_by_id(pane_id) + } else if let Some(session_id) = get_string(&request.params, "session_id") { + self.resolve_active_pane(session_id) + } else { + None + }; + let Some((session, _window, pane)) = pane else { + return rpc_error(request.id.clone(), "not_found", "pane not found"); + }; + match self.wait_for_busy( + &session.id, + &pane.pane_id, + &pane, + Duration::from_millis(timeout_ms), + ) { + Ok(()) => rpc_ok(request.id.clone(), json!({ "busy": true })), + Err(err) => rpc_error(request.id.clone(), "deadline_exceeded", err), + } + } + "ready" => { + let pane = if let Some(pane_id) = get_string(&request.params, "pane_id") { + self.find_pane_by_id(pane_id) + } else if let Some(session_id) = get_string(&request.params, "session_id") { + self.resolve_active_pane(session_id) + } else { + None + }; + let Some((_session, _window, pane)) = pane else { + return rpc_error(request.id.clone(), "not_found", "pane not found"); + }; + match self.wait_for_idle(&pane, Duration::from_millis(timeout_ms)) { + Ok(()) => rpc_ok(request.id.clone(), json!({ "ready": true })), + Err(err) => rpc_error(request.id.clone(), "deadline_exceeded", err), + } + } + "idle" => { + let pane = if let Some(pane_id) = get_string(&request.params, "pane_id") { + self.find_pane_by_id(pane_id) + } else if let Some(session_id) = get_string(&request.params, "session_id") { + self.resolve_active_pane(session_id) + } else { + None + }; + let Some((_session, _window, pane)) = pane else { + return rpc_error(request.id.clone(), "not_found", "pane not found"); + }; + match self.wait_for_idle(&pane, Duration::from_millis(timeout_ms)) { + Ok(()) => rpc_ok(request.id.clone(), json!({ "idle": true })), + Err(err) => rpc_error(request.id.clone(), "deadline_exceeded", err), + } + } + _ => rpc_error( + request.id.clone(), + "invalid_params", + "unsupported wait kind", + ), + } + } + + fn handle_amux_events_read(&self, request: &Request) -> Response { + let cursor = get_non_negative_u64(&request.params, "cursor").unwrap_or(0); + let timeout_ms = get_non_negative_i64(&request.params, "timeout_ms").unwrap_or(0) as u64; + let filters = get_filters(&request.params); + let session_id = get_string(&request.params, "session_id").map(ToString::to_string); + let pane_id = get_string(&request.params, "pane_id").map(ToString::to_string); + let (next_cursor, events) = self.read_events( + cursor, + Duration::from_millis(timeout_ms), + &filters, + session_id.as_deref(), + pane_id.as_deref(), + ); + rpc_ok( + request.id.clone(), + json!({ + "cursor": next_cursor, + "events": events, + }), + ) + } + + fn handle_tmux_exec(&self, request: &Request) -> Response { + let argv = match request.params.get("argv").and_then(Value::as_array) { + Some(values) => { + let mut argv = Vec::with_capacity(values.len()); + for value in values { + let Some(value) = value.as_str() else { + return rpc_error( + request.id.clone(), + "invalid_params", + "tmux.exec argv entries must be strings", + ); + }; + argv.push(value.to_string()); + } + argv + } + None => { + return rpc_error( + request.id.clone(), + "invalid_params", + "tmux.exec requires argv", + ); + } + }; + match self.tmux_exec(&argv) { + Ok(result) => rpc_ok(request.id.clone(), result), + Err(err) => rpc_error(request.id.clone(), "tmux_error", err), + } + } + + fn ensure_session(&self, requested_id: Option<&str>) -> Result<SessionSnapshot, String> { + let session = { + let mut state = self.inner.state.lock().unwrap(); + let session_id = match requested_id { + Some(value) => value.to_string(), + None => allocate_generated_session_id(&mut state), + }; + if let Some(existing) = state.sessions.get(&session_id) { + existing.clone() + } else { + let session = Arc::new(Session::new(session_id.clone())); + state.sessions.insert(session_id, Arc::clone(&session)); + session + } + }; + Ok(session.snapshot()) + } + + fn open_terminal( + &self, + requested_session_id: Option<&str>, + command: &str, + cols: u16, + rows: u16, + ) -> Result<(SessionSnapshot, String), OpenTerminalError> { + let ( + session, + session_id, + attachment_id, + window_id, + pane_id, + effective_cols, + effective_rows, + ) = { + let mut state = self.inner.state.lock().unwrap(); + let session_id = match requested_session_id { + Some(value) => { + if state.sessions.contains_key(value) { + return Err(OpenTerminalError::AlreadyExists); + } + value.to_string() + } + None => allocate_generated_session_id(&mut state), + }; + let attachment_id = format!("att-{}", state.next_attachment_id); + state.next_attachment_id += 1; + let window_id = format!("win-{}", state.next_window_id); + state.next_window_id += 1; + let pane_id = format!("pane-{}", state.next_pane_id); + state.next_pane_id += 1; + + let session = Arc::new(Session::new(session_id.clone())); + session + .attach(attachment_id.clone(), cols, rows) + .map_err(|err| OpenTerminalError::Other(format!("{err:?}")))?; + let (effective_cols, effective_rows) = session.effective_size(); + ( + session, + session_id, + attachment_id, + window_id, + pane_id, + effective_cols, + effective_rows, + ) + }; + + let event_daemon = self.clone(); + let pane_events: EventCallback = + Arc::new(move |event| event_daemon.handle_pane_event(event)); + let handle = PaneHandle::spawn( + &session_id, + &pane_id, + command, + effective_cols, + effective_rows, + pane_events, + ) + .map_err(OpenTerminalError::Other)?; + + { + let mut inner = session.inner.lock().unwrap(); + inner.windows.push(Window { + id: window_id.clone(), + name: session_id.clone(), + panes: vec![PaneSlot { + pane_id: pane_id.clone(), + command: command.to_string(), + handle: Arc::clone(&handle), + }], + active_pane: 0, + last_pane: None, + }); + inner.active_window = 0; + } + + { + let mut state = self.inner.state.lock().unwrap(); + if state.sessions.contains_key(&session_id) { + drop(state); + handle.close(); + return Err(OpenTerminalError::AlreadyExists); + } + state + .sessions + .insert(session_id.clone(), Arc::clone(&session)); + } + + let mut state = self.inner.state.lock().unwrap(); + self.emit_event_locked( + &mut state, + "session.open", + json!({ "session_id": session_id }), + ); + self.emit_event_locked( + &mut state, + "window.open", + json!({ "session_id": session_id, "window_id": window_id }), + ); + self.emit_event_locked( + &mut state, + "pane.open", + json!({ "session_id": session_id, "pane_id": pane_id }), + ); + self.inner.state_cv.notify_all(); + Ok((session.snapshot(), attachment_id)) + } + + fn close_session(&self, session_id: &str) -> Result<(), SessionError> { + let session = { + let mut state = self.inner.state.lock().unwrap(); + state + .sessions + .remove(session_id) + .ok_or(SessionError::NotFound)? + }; + let close_events = self.session_close_events(&session); + { + let mut state = self.inner.state.lock().unwrap(); + for (kind, payload) in close_events { + self.emit_event_locked(&mut state, kind, payload); + } + self.emit_event_locked( + &mut state, + "session.close", + json!({ "session_id": session_id }), + ); + self.inner.state_cv.notify_all(); + } + for pane in collect_panes(&session) { + self.tmux_close_pipe(&pane.pane_id); + pane.close(); + } + Ok(()) + } + + fn attach_session( + &self, + session_id: &str, + attachment_id: &str, + cols: u16, + rows: u16, + ) -> Result<SessionSnapshot, SessionError> { + let session = self + .find_session(session_id) + .ok_or(SessionError::NotFound)?; + session.attach(attachment_id.to_string(), cols, rows)?; + self.resize_session_panes(&session); + let snapshot = session.snapshot(); + let mut state = self.inner.state.lock().unwrap(); + self.emit_event_locked( + &mut state, + "session.attach", + json!({ "session_id": session_id, "attachment_id": attachment_id, "cols": cols, "rows": rows }), + ); + self.inner.state_cv.notify_all(); + Ok(snapshot) + } + + fn resize_session( + &self, + session_id: &str, + attachment_id: &str, + cols: u16, + rows: u16, + ) -> Result<SessionSnapshot, SessionError> { + let session = self + .find_session(session_id) + .ok_or(SessionError::NotFound)?; + session.resize_attachment(attachment_id, cols, rows)?; + self.resize_session_panes(&session); + let snapshot = session.snapshot(); + let mut state = self.inner.state.lock().unwrap(); + self.emit_event_locked( + &mut state, + "session.resize", + json!({ "session_id": session_id, "attachment_id": attachment_id, "cols": cols, "rows": rows }), + ); + self.inner.state_cv.notify_all(); + Ok(snapshot) + } + + fn detach_session( + &self, + session_id: &str, + attachment_id: &str, + ) -> Result<SessionSnapshot, SessionError> { + let session = self + .find_session(session_id) + .ok_or(SessionError::NotFound)?; + session.detach(attachment_id)?; + self.resize_session_panes(&session); + let snapshot = session.snapshot(); + let mut state = self.inner.state.lock().unwrap(); + self.emit_event_locked( + &mut state, + "session.detach", + json!({ "session_id": session_id, "attachment_id": attachment_id }), + ); + self.inner.state_cv.notify_all(); + Ok(snapshot) + } + + fn resolve_active_pane( + &self, + session_id: &str, + ) -> Option<(Arc<Session>, String, Arc<PaneHandle>)> { + let session = self.find_session(session_id)?; + let inner = session.inner.lock().unwrap(); + let window = inner.windows.get(inner.active_window)?; + let pane = window.panes.get(window.active_pane)?; + Some((session.clone(), window.id.clone(), pane.handle.clone())) + } + + fn resize_session_panes(&self, session: &Arc<Session>) { + let (cols, rows) = session.effective_size(); + if cols == 0 || rows == 0 { + return; + } + for pane in collect_panes(session) { + let _ = pane.resize(cols, rows); + } + } + + fn handle_pane_event(&self, event: PaneRuntimeEvent) { + let mut pipe_write: Option<(String, Arc<Mutex<std::process::ChildStdin>>, Vec<u8>)> = None; + let mut state = self.inner.state.lock().unwrap(); + match event { + PaneRuntimeEvent::Output { + session_id, + pane_id, + data, + } => { + if let Some(pipe) = state.pane_pipes.get(&pane_id) { + pipe_write = Some((pane_id.clone(), Arc::clone(pipe), data.clone())); + } + self.emit_event_locked( + &mut state, + "pane.output", + json!({ "session_id": session_id, "pane_id": pane_id, "len": data.len() }), + ) + } + PaneRuntimeEvent::Busy { + session_id, + pane_id, + } => self.emit_event_locked( + &mut state, + "busy", + json!({ "session_id": session_id, "pane_id": pane_id }), + ), + PaneRuntimeEvent::Idle { + session_id, + pane_id, + } => self.emit_event_locked( + &mut state, + "idle", + json!({ "session_id": session_id, "pane_id": pane_id }), + ), + PaneRuntimeEvent::Exit { + session_id, + pane_id, + } => { + state.pane_pipes.remove(&pane_id); + self.emit_event_locked( + &mut state, + "exited", + json!({ "session_id": session_id, "pane_id": pane_id }), + ) + } + } + self.inner.state_cv.notify_all(); + drop(state); + + if let Some((pane_id, pipe, data)) = pipe_write { + let result = { + let mut stdin = pipe.lock().unwrap(); + stdin + .write_all(&data) + .and_then(|_| stdin.flush()) + .map_err(|err| err.to_string()) + }; + if result.is_err() { + let mut state = self.inner.state.lock().unwrap(); + state.pane_pipes.remove(&pane_id); + } + } + } + + fn current_signal_generation(&self, name: &str) -> u64 { + self.inner + .state + .lock() + .unwrap() + .wait_signals + .get(name) + .copied() + .unwrap_or(0) + } + + fn wait_for_signal( + &self, + name: &str, + after_generation: u64, + timeout: Duration, + ) -> Result<u64, String> { + let deadline = Instant::now() + timeout; + let mut state = self.inner.state.lock().unwrap(); + loop { + if let Some(generation) = state.wait_signals.get(name).copied() { + if generation > after_generation { + return Ok(generation); + } + } + let now = Instant::now(); + if now >= deadline { + return Err(format!("wait timed out waiting for '{name}'")); + } + let (next_state, wait_result) = self + .inner + .state_cv + .wait_timeout(state, deadline - now) + .unwrap(); + state = next_state; + if wait_result.timed_out() { + return Err(format!("wait timed out waiting for '{name}'")); + } + } + } + + fn wait_for_content( + &self, + pane: &PaneHandle, + needle: &str, + timeout: Duration, + ) -> Result<(), String> { + let deadline = Instant::now() + timeout; + loop { + let capture = pane.capture(true)?; + let content = join_history(&capture.capture.history, &capture.capture.visible); + if content.contains(needle) { + return Ok(()); + } + let now = Instant::now(); + if now >= deadline { + return Err("content wait timed out".to_string()); + } + let guard = pane.shared.state.lock().unwrap(); + let _ = pane.shared.cv.wait_timeout(guard, deadline - now).unwrap(); + } + } + + fn wait_for_exit(&self, pane: &PaneHandle, timeout: Duration) -> Result<(), String> { + let deadline = Instant::now() + timeout; + let mut guard = pane.shared.state.lock().unwrap(); + loop { + if guard.closed { + return Ok(()); + } + let now = Instant::now(); + if now >= deadline { + return Err("exit wait timed out".to_string()); + } + let (next_guard, wait_result) = + pane.shared.cv.wait_timeout(guard, deadline - now).unwrap(); + guard = next_guard; + if wait_result.timed_out() { + return Err("exit wait timed out".to_string()); + } + } + } + + #[cfg(test)] + fn current_event_cursor(&self) -> u64 { + let state = self.inner.state.lock().unwrap(); + state.event_base_cursor + state.events.len() as u64 + } + + fn wait_for_busy( + &self, + _session_id: &str, + _pane_id: &str, + pane: &PaneHandle, + timeout: Duration, + ) -> Result<(), String> { + let deadline = Instant::now() + timeout; + let mut guard = pane.shared.state.lock().unwrap(); + let start_generation = guard.busy_generation; + loop { + if guard.busy || guard.busy_generation != start_generation { + return Ok(()); + } + let now = Instant::now(); + if now >= deadline { + return Err("busy wait timed out".to_string()); + } + let (next_guard, wait_result) = + pane.shared.cv.wait_timeout(guard, deadline - now).unwrap(); + guard = next_guard; + if wait_result.timed_out() { + return Err("busy wait timed out".to_string()); + } + } + } + + fn wait_for_idle(&self, pane: &PaneHandle, timeout: Duration) -> Result<(), String> { + let deadline = Instant::now() + timeout; + let mut guard = pane.shared.state.lock().unwrap(); + loop { + if !guard.busy { + return Ok(()); + } + let now = Instant::now(); + if now >= deadline { + return Err("idle wait timed out".to_string()); + } + let (next_guard, wait_result) = + pane.shared.cv.wait_timeout(guard, deadline - now).unwrap(); + guard = next_guard; + if wait_result.timed_out() { + return Err("idle wait timed out".to_string()); + } + } + } + + fn read_events( + &self, + cursor: u64, + timeout: Duration, + filters: &BTreeSet<String>, + session_id: Option<&str>, + pane_id: Option<&str>, + ) -> (u64, Vec<Value>) { + let deadline = Instant::now() + timeout; + let mut state = self.inner.state.lock().unwrap(); + loop { + let filtered = collect_events(&state, cursor, filters, session_id, pane_id); + if !filtered.is_empty() || timeout.is_zero() { + let next_cursor = state.event_base_cursor + state.events.len() as u64; + return (next_cursor, filtered); + } + let now = Instant::now(); + if now >= deadline { + return (cursor, Vec::new()); + } + let (next_state, wait_result) = self + .inner + .state_cv + .wait_timeout(state, deadline - now) + .unwrap(); + state = next_state; + if wait_result.timed_out() { + return (cursor, Vec::new()); + } + } + } + + fn consume_nonce(&self, nonce: &str, expires_at: i64) -> Result<(), String> { + let now = unix_now_secs(); + let mut state = self.inner.state.lock().unwrap(); + state.used_nonces.retain(|_, expiry| *expiry > now); + if state.used_nonces.contains_key(nonce) { + return Err("ticket nonce already used".to_string()); + } + state.used_nonces.insert(nonce.to_string(), expires_at); + Ok(()) + } + + fn emit_event_locked(&self, state: &mut CoreState, kind: &str, payload: Value) { + let cursor = state.next_event_id; + state.next_event_id += 1; + let mut event = json!({ + "cursor": cursor, + "kind": kind, + "time_ms": unix_now(), + }); + if let (Some(event_obj), Some(payload_obj)) = (event.as_object_mut(), payload.as_object()) { + for (key, value) in payload_obj { + event_obj.insert(key.clone(), value.clone()); + } + } + state.events.push_back(event); + while state.events.len() > 4096 { + state.events.pop_front(); + state.event_base_cursor += 1; + } + } +} + +fn request_method_for_frame(frame: &[u8]) -> Option<String> { + serde_json::from_slice::<Value>(trim_crlf(frame)) + .ok() + .and_then(|value| { + value + .get("method") + .and_then(Value::as_str) + .map(ToString::to_string) + }) +} + +fn debug_log(message: &str) { + if env::var_os("CMUX_REMOTE_DEBUG_LOG").is_some() { + eprintln!("cmuxd-remote debug: {message}"); + } +} + +impl Daemon { + fn tmux_exec(&self, argv: &[String]) -> Result<Value, String> { + if argv.is_empty() { + return Err("tmux.exec requires a command".to_string()); + } + + let command = argv[0].as_str(); + let raw_args = &argv[1..]; + + match command { + "new-session" | "new" => { + let parsed = parse_tmux_args( + raw_args, + &["-c", "-F", "-n", "-s", "-x", "-y"], + &["-A", "-d", "-P"], + )?; + let requested_session = parsed.value("-s").map(ToString::to_string); + let command_text = tmux_shell_command(parsed.positional(), parsed.value("-c")); + let cols = parsed + .value("-x") + .and_then(|value| value.parse::<u16>().ok()) + .filter(|value| *value > 0) + .unwrap_or(80); + let rows = parsed + .value("-y") + .and_then(|value| value.parse::<u16>().ok()) + .filter(|value| *value > 0) + .unwrap_or(24); + + let session = if parsed.has_flag("-A") { + requested_session + .as_deref() + .and_then(|value| self.find_session(value)) + } else { + None + }; + + let (session, window_index, pane_index) = if let Some(session) = session { + let inner = session.inner.lock().unwrap(); + if inner.windows.is_empty() { + return Err("existing session has no windows".to_string()); + } + ( + session.clone(), + inner.active_window, + inner.windows[inner.active_window].active_pane, + ) + } else { + let (snapshot, attachment_id) = self + .open_terminal(requested_session.as_deref(), &command_text, cols, rows) + .map_err(tmux_open_terminal_error)?; + let session = self + .find_session(&snapshot.session_id) + .ok_or_else(|| "created session disappeared".to_string())?; + let _ = self.detach_session(&snapshot.session_id, &attachment_id); + if let Some(title) = parsed.value("-n") { + if !title.trim().is_empty() { + let mut inner = session.inner.lock().unwrap(); + if let Some(window) = inner.windows.get_mut(0) { + window.name = title.to_string(); + } + } + } + (session, 0, 0) + }; + + let stdout = if parsed.has_flag("-P") { + let context = + self.tmux_format_context(&session, window_index, Some(pane_index))?; + tmux_render_format( + parsed.value("-F"), + &context, + &tmux_session_display_id(&session.id), + ) + } else { + String::new() + }; + + Ok(tmux_result( + stdout, + json!({ + "session_id": session.id, + "window_id": tmux_window_display_id(&self.tmux_window_id(&session, window_index)?), + "pane_id": tmux_pane_display_id(&self.tmux_pane_id(&session, window_index, pane_index)?), + }), + )) + } + "new-window" | "neww" => { + let parsed = parse_tmux_args(raw_args, &["-c", "-F", "-n", "-t"], &["-d", "-P"])?; + let session = self.tmux_resolve_session(parsed.value("-t"))?; + let (window_index, pane_index) = self.tmux_create_window( + &session, + parsed.value("-n").map(ToString::to_string), + &tmux_shell_command(parsed.positional(), parsed.value("-c")), + !parsed.has_flag("-d"), + )?; + let stdout = if parsed.has_flag("-P") { + let context = + self.tmux_format_context(&session, window_index, Some(pane_index))?; + let pane_id = self.tmux_pane_id(&session, window_index, pane_index)?; + tmux_render_format( + parsed.value("-F"), + &context, + &tmux_pane_display_id(&pane_id), + ) + } else { + String::new() + }; + Ok(tmux_result( + stdout, + json!({ + "session_id": session.id, + "window_id": tmux_window_display_id(&self.tmux_window_id(&session, window_index)?), + "pane_id": tmux_pane_display_id(&self.tmux_pane_id(&session, window_index, pane_index)?), + }), + )) + } + "split-window" | "splitw" => { + let parsed = parse_tmux_args( + raw_args, + &["-c", "-F", "-l", "-t"], + &["-P", "-b", "-d", "-h", "-v"], + )?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + let pane_index = self.tmux_create_pane( + &target.session, + target.window_index, + &tmux_shell_command(parsed.positional(), parsed.value("-c")), + !parsed.has_flag("-d"), + )?; + let stdout = if parsed.has_flag("-P") { + let context = self.tmux_format_context( + &target.session, + target.window_index, + Some(pane_index), + )?; + let pane_id = + self.tmux_pane_id(&target.session, target.window_index, pane_index)?; + tmux_render_format( + parsed.value("-F"), + &context, + &tmux_pane_display_id(&pane_id), + ) + } else { + String::new() + }; + Ok(tmux_result( + stdout, + json!({ + "session_id": target.session.id, + "window_id": tmux_window_display_id(&self.tmux_window_id(&target.session, target.window_index)?), + "pane_id": tmux_pane_display_id(&self.tmux_pane_id(&target.session, target.window_index, pane_index)?), + }), + )) + } + "select-window" | "selectw" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let target = self.tmux_resolve_window(parsed.value("-t"))?; + self.tmux_select_window(&target.session, target.window_index)?; + Ok(tmux_result( + String::new(), + json!({ "session_id": target.session.id }), + )) + } + "select-pane" | "selectp" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + self.tmux_select_pane(&target.session, target.window_index, target.pane_index)?; + Ok(tmux_result( + String::new(), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + }), + )) + } + "kill-window" | "killw" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let target = self.tmux_resolve_window(parsed.value("-t"))?; + self.tmux_kill_window(&target.session, target.window_index)?; + Ok(tmux_result( + String::new(), + json!({ "session_id": target.session.id }), + )) + } + "kill-pane" | "killp" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + self.tmux_kill_pane(&target.session, target.window_index, target.pane_index)?; + Ok(tmux_result( + String::new(), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + }), + )) + } + "send-keys" | "send" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &["-l"])?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + let data = tmux_send_keys_bytes(parsed.positional(), parsed.has_flag("-l")); + target + .handle + .write(data) + .map_err(|err| format!("send-keys failed: {err}"))?; + Ok(tmux_result( + String::new(), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + }), + )) + } + "capture-pane" | "capturep" => { + let parsed = parse_tmux_args( + raw_args, + &["-E", "-S", "-t"], + &["-J", "-N", "-p", "-e", "-q"], + )?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + let include_history = parsed + .value("-S") + .map(|value| { + value == "-" || value.parse::<i64>().map(|line| line < 0).unwrap_or(false) + }) + .unwrap_or(false); + let capture = target.handle.capture(include_history)?; + let text = tmux_capture_text( + &capture.capture, + include_history, + parsed.value("-S"), + parsed.value("-E"), + ); + if parsed.has_flag("-p") { + Ok(tmux_result( + tmux_line_output(&text), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + }), + )) + } else { + let mut state = self.inner.state.lock().unwrap(); + state.buffers.insert("default".to_string(), text.clone()); + Ok(tmux_result( + String::new(), + json!({ + "buffer": "default", + "bytes": text.len(), + }), + )) + } + } + "display-message" | "display" | "displayp" => { + let parsed = parse_tmux_args(raw_args, &["-F", "-t"], &["-p"])?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + let context = self.tmux_format_context( + &target.session, + target.window_index, + Some(target.pane_index), + )?; + let owned_format; + let format = if parsed.positional().is_empty() { + parsed.value("-F") + } else { + owned_format = parsed.positional().join(" "); + Some(owned_format.as_str()) + }; + let rendered = tmux_render_format(format, &context, ""); + Ok(tmux_result( + tmux_line_output(&rendered), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + }), + )) + } + "list-windows" | "lsw" => { + let parsed = parse_tmux_args(raw_args, &["-F", "-t"], &[])?; + let session = self.tmux_resolve_session(parsed.value("-t"))?; + let window_count = session.inner.lock().unwrap().windows.len(); + let mut lines = Vec::with_capacity(window_count); + for window_index in 0..window_count { + let context = self.tmux_format_context(&session, window_index, None)?; + let window_id = self.tmux_window_id(&session, window_index)?; + let fallback = + format!("{} {}", window_index, tmux_window_display_id(&window_id)); + lines.push(tmux_render_format(parsed.value("-F"), &context, &fallback)); + } + Ok(tmux_result( + lines.join("\n"), + json!({ "session_id": session.id }), + )) + } + "list-panes" | "lsp" => { + let parsed = parse_tmux_args(raw_args, &["-F", "-t"], &[])?; + let window = self.tmux_resolve_window(parsed.value("-t"))?; + let pane_count = { + let inner = window.session.inner.lock().unwrap(); + inner + .windows + .get(window.window_index) + .map(|value| value.panes.len()) + .ok_or_else(|| "window not found".to_string())? + }; + let mut lines = Vec::with_capacity(pane_count); + for pane_index in 0..pane_count { + let context = self.tmux_format_context( + &window.session, + window.window_index, + Some(pane_index), + )?; + let pane_id = + self.tmux_pane_id(&window.session, window.window_index, pane_index)?; + lines.push(tmux_render_format( + parsed.value("-F"), + &context, + &tmux_pane_display_id(&pane_id), + )); + } + Ok(tmux_result( + lines.join("\n"), + json!({ + "session_id": window.session.id, + "window_id": tmux_window_display_id(&self.tmux_window_id(&window.session, window.window_index)?), + }), + )) + } + "rename-window" | "renamew" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let title = parsed.positional().join(" ").trim().to_string(); + if title.is_empty() { + return Err("rename-window requires a title".to_string()); + } + let target = self.tmux_resolve_window(parsed.value("-t"))?; + let mut inner = target.session.inner.lock().unwrap(); + let window = inner + .windows + .get_mut(target.window_index) + .ok_or_else(|| "window not found".to_string())?; + window.name = title; + Ok(tmux_result( + String::new(), + json!({ "session_id": target.session.id }), + )) + } + "resize-pane" | "resizep" => { + let parsed = + parse_tmux_args(raw_args, &["-t", "-x", "-y"], &["-D", "-L", "-R", "-U"])?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + let exact_cols = parsed + .value("-x") + .and_then(|value| value.trim_end_matches('%').parse::<u16>().ok()) + .filter(|value| *value > 0); + let exact_rows = parsed + .value("-y") + .and_then(|value| value.trim_end_matches('%').parse::<u16>().ok()) + .filter(|value| *value > 0); + let amount = exact_cols.or(exact_rows).unwrap_or(5); + let capture = target.handle.capture(false)?; + let mut cols = capture.capture.cols.max(2); + let mut rows = capture.capture.rows.max(1); + if parsed.has_flag("-L") { + cols = cols.saturating_sub(amount).max(2); + } else if parsed.has_flag("-R") { + cols = cols.saturating_add(amount); + } else if parsed.has_flag("-U") { + rows = rows.saturating_sub(amount).max(1); + } else if parsed.has_flag("-D") { + rows = rows.saturating_add(amount); + } else { + if let Some(exact_cols) = exact_cols { + cols = exact_cols.max(2); + } + if let Some(exact_rows) = exact_rows { + rows = exact_rows.max(1); + } + } + target.handle.resize(cols, rows)?; + Ok(tmux_result( + String::new(), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + "cols": cols, + "rows": rows, + }), + )) + } + "wait-for" => { + let parsed = parse_tmux_args(raw_args, &[], &["-S"])?; + let name = parsed + .positional() + .first() + .ok_or_else(|| "wait-for requires a name".to_string())?; + if parsed.has_flag("-S") { + let generation = self.signal_wait(name); + Ok(tmux_result( + String::new(), + json!({ "name": name, "generation": generation }), + )) + } else { + let current_generation = self.current_signal_generation(name); + let generation = if current_generation > 0 { + current_generation + } else { + self.wait_for_signal(name, 0, Duration::from_secs(30))? + }; + Ok(tmux_result( + String::new(), + json!({ "name": name, "generation": generation }), + )) + } + } + "last-pane" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let target = self.tmux_resolve_window(parsed.value("-t"))?; + self.tmux_last_pane(&target.session, target.window_index)?; + Ok(tmux_result( + String::new(), + json!({ "session_id": target.session.id }), + )) + } + "last-window" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let session = self.tmux_resolve_session(parsed.value("-t"))?; + self.tmux_last_window(&session)?; + Ok(tmux_result( + String::new(), + json!({ "session_id": session.id }), + )) + } + "next-window" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let session = self.tmux_resolve_session(parsed.value("-t"))?; + self.tmux_cycle_window(&session, 1)?; + Ok(tmux_result( + String::new(), + json!({ "session_id": session.id }), + )) + } + "previous-window" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let session = self.tmux_resolve_session(parsed.value("-t"))?; + self.tmux_cycle_window(&session, -1)?; + Ok(tmux_result( + String::new(), + json!({ "session_id": session.id }), + )) + } + "has-session" | "has" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let _ = self.tmux_resolve_session(parsed.value("-t"))?; + Ok(tmux_result(String::new(), json!({ "exists": true }))) + } + "set-buffer" => { + let parsed = parse_tmux_args(raw_args, &["-b"], &[])?; + let text = parsed.positional().join(" ").trim().to_string(); + if text.is_empty() { + return Err("set-buffer requires text".to_string()); + } + let name = parsed.value("-b").unwrap_or("default"); + let mut state = self.inner.state.lock().unwrap(); + state.buffers.insert(name.to_string(), text); + Ok(tmux_result(String::new(), json!({ "buffer": name }))) + } + "show-buffer" | "showb" => { + let parsed = parse_tmux_args(raw_args, &["-b"], &[])?; + let name = parsed.value("-b").unwrap_or("default"); + let state = self.inner.state.lock().unwrap(); + let buffer = state + .buffers + .get(name) + .ok_or_else(|| format!("buffer not found: {name}"))? + .clone(); + Ok(tmux_result( + tmux_line_output(&buffer), + json!({ "buffer": name }), + )) + } + "save-buffer" | "saveb" => { + let parsed = parse_tmux_args(raw_args, &["-b"], &[])?; + let name = parsed.value("-b").unwrap_or("default"); + let buffer = { + let state = self.inner.state.lock().unwrap(); + state + .buffers + .get(name) + .ok_or_else(|| format!("buffer not found: {name}"))? + .clone() + }; + if let Some(path) = parsed.positional().first() { + fs::write(path, buffer.as_bytes()).map_err(|err| err.to_string())?; + Ok(tmux_result( + String::new(), + json!({ "buffer": name, "path": path }), + )) + } else { + Ok(tmux_result( + tmux_line_output(&buffer), + json!({ "buffer": name }), + )) + } + } + "list-buffers" => { + let state = self.inner.state.lock().unwrap(); + let mut lines = Vec::with_capacity(state.buffers.len()); + for (name, buffer) in &state.buffers { + lines.push(format!("{name}\t{}", buffer.len())); + } + Ok(tmux_result( + lines.join("\n"), + json!({ "count": state.buffers.len() }), + )) + } + "paste-buffer" => { + let parsed = parse_tmux_args(raw_args, &["-b", "-t"], &[])?; + let name = parsed.value("-b").unwrap_or("default"); + let buffer = { + let state = self.inner.state.lock().unwrap(); + state + .buffers + .get(name) + .ok_or_else(|| format!("buffer not found: {name}"))? + .clone() + }; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + target.handle.write(buffer.into_bytes())?; + Ok(tmux_result( + String::new(), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + }), + )) + } + "pipe-pane" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let shell_command = parsed.positional().join(" ").trim().to_string(); + if shell_command.is_empty() { + return Err("pipe-pane requires a shell command".to_string()); + } + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + self.tmux_close_pipe(&target.pane_id); + self.tmux_open_pipe(&target.pane_id, &shell_command)?; + Ok(tmux_result( + String::new(), + json!({ + "pane_id": tmux_pane_display_id(&target.pane_id), + "status": 0, + }), + )) + } + "find-window" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &[])?; + let query = parsed.positional().join(" ").trim().to_string(); + let lines = self.tmux_find_windows(parsed.value("-t"), &query)?; + Ok(tmux_result(String::new(), json!({ "count": lines.len() }))) + } + "respawn-pane" => { + let parsed = parse_tmux_args(raw_args, &["-t"], &["-k"])?; + let target = self.tmux_resolve_pane(parsed.value("-t"))?; + let command_text = if parsed.positional().is_empty() { + "exec ${SHELL:-/bin/sh} -l".to_string() + } else { + parsed.positional().join(" ") + }; + self.tmux_respawn_pane( + &target.session, + target.window_index, + target.pane_index, + &command_text, + )?; + Ok(tmux_result( + String::new(), + json!({ + "session_id": target.session.id, + "pane_id": tmux_pane_display_id(&target.pane_id), + }), + )) + } + _ => Err(format!("unsupported tmux command: {command}")), + } + } + + fn tmux_default_session(&self) -> Result<Arc<Session>, String> { + self.sessions() + .into_iter() + .next() + .ok_or_else(|| "no sessions available".to_string()) + } + + fn tmux_resolve_session(&self, target: Option<&str>) -> Result<Arc<Session>, String> { + let Some(raw_target) = target.map(str::trim).filter(|value| !value.is_empty()) else { + return self.tmux_default_session(); + }; + if let Some((session_part, _, _)) = tmux_split_target(raw_target) { + let session_part = session_part.trim_start_matches('$'); + return self + .find_session(session_part) + .ok_or_else(|| format!("session not found: {session_part}")); + } + let lookup = raw_target.trim_start_matches('$'); + self.find_session(lookup) + .ok_or_else(|| format!("session not found: {lookup}")) + } + + fn tmux_resolve_window(&self, target: Option<&str>) -> Result<TmuxWindowTarget, String> { + let Some(raw_target) = target.map(str::trim).filter(|value| !value.is_empty()) else { + let session = self.tmux_default_session()?; + let active_window = session.inner.lock().unwrap().active_window; + return Ok(TmuxWindowTarget { + session, + window_index: active_window, + }); + }; + + if let Some(window_id) = raw_target.strip_prefix('@') { + for session in self.sessions() { + let window_index = { + let inner = session.inner.lock().unwrap(); + inner + .windows + .iter() + .position(|window| window.id == window_id) + }; + if let Some(window_index) = window_index { + return Ok(TmuxWindowTarget { + session, + window_index, + }); + } + } + return Err(format!("window not found: {window_id}")); + } + + let (session, lookup) = + if let Some((session_part, window_part, _)) = tmux_split_target(raw_target) { + let session_part = session_part.trim_start_matches('$'); + ( + self.find_session(session_part) + .ok_or_else(|| format!("session not found: {session_part}"))?, + window_part, + ) + } else { + (self.tmux_default_session()?, raw_target) + }; + + let window_index = self.tmux_window_index_in_session(&session, lookup)?; + Ok(TmuxWindowTarget { + session, + window_index, + }) + } + + fn tmux_resolve_pane(&self, target: Option<&str>) -> Result<TmuxPaneTarget, String> { + let Some(raw_target) = target.map(str::trim).filter(|value| !value.is_empty()) else { + let session = self.tmux_default_session()?; + let (window_index, pane_index, pane_id, handle) = { + let inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get(inner.active_window) + .ok_or_else(|| "session has no windows".to_string())?; + let pane = window + .panes + .get(window.active_pane) + .ok_or_else(|| "window has no panes".to_string())?; + ( + inner.active_window, + window.active_pane, + pane.pane_id.clone(), + pane.handle.clone(), + ) + }; + return Ok(TmuxPaneTarget { + session, + window_index, + pane_index, + pane_id, + handle, + }); + }; + + if raw_target.starts_with('@') { + let window = self.tmux_resolve_window(Some(raw_target))?; + return self.tmux_active_pane_target(window.session, window.window_index); + } + + if let Some((session_part, window_part, pane_part)) = tmux_split_target(raw_target) { + let session_part = session_part.trim_start_matches('$'); + let session = self + .find_session(session_part) + .ok_or_else(|| format!("session not found: {session_part}"))?; + let window_index = self.tmux_window_index_in_session(&session, window_part)?; + if pane_part.is_empty() { + return self.tmux_active_pane_target(session, window_index); + } + return self.tmux_pane_target_in_window(session, window_index, pane_part); + } + + if let Some((window_part, pane_part)) = raw_target.split_once('.') { + let session = self.tmux_default_session()?; + let window_index = self.tmux_window_index_in_session(&session, window_part)?; + return self.tmux_pane_target_in_window(session, window_index, pane_part); + } + + let session = self.tmux_default_session()?; + let active_window = session.inner.lock().unwrap().active_window; + if let Ok(target) = + self.tmux_pane_target_in_window(session.clone(), active_window, raw_target) + { + return Ok(target); + } + + if !raw_target.starts_with('%') { + return Err(format!("pane not found: {raw_target}")); + } + let lookup = raw_target.trim_start_matches('%'); + for session in self.sessions() { + let found = { + let inner = session.inner.lock().unwrap(); + let mut found = None; + for (window_index, window) in inner.windows.iter().enumerate() { + if let Some(pane_index) = window + .panes + .iter() + .enumerate() + .position(|(pane_index, pane)| tmux_pane_matches(pane_index, pane, lookup)) + { + let pane = &window.panes[pane_index]; + found = Some(( + window_index, + pane_index, + pane.pane_id.clone(), + pane.handle.clone(), + )); + break; + } + } + found + }; + if let Some((window_index, pane_index, pane_id, handle)) = found { + return Ok(TmuxPaneTarget { + session, + window_index, + pane_index, + pane_id, + handle, + }); + } + } + Err(format!("pane not found: {lookup}")) + } + + fn tmux_window_index_in_session( + &self, + session: &Arc<Session>, + lookup: &str, + ) -> Result<usize, String> { + let lookup = if lookup.is_empty() { "0" } else { lookup }; + session + .inner + .lock() + .unwrap() + .windows + .iter() + .enumerate() + .position(|(index, window)| tmux_window_matches(index, window, lookup)) + .ok_or_else(|| format!("window not found: {lookup}")) + } + + fn tmux_active_pane_target( + &self, + session: Arc<Session>, + window_index: usize, + ) -> Result<TmuxPaneTarget, String> { + let (pane_index, pane_id, handle) = { + let inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get(window_index) + .ok_or_else(|| "window not found".to_string())?; + let pane = window + .panes + .get(window.active_pane) + .ok_or_else(|| "window has no panes".to_string())?; + ( + window.active_pane, + pane.pane_id.clone(), + pane.handle.clone(), + ) + }; + Ok(TmuxPaneTarget { + session, + window_index, + pane_index, + pane_id, + handle, + }) + } + + fn tmux_pane_target_in_window( + &self, + session: Arc<Session>, + window_index: usize, + lookup: &str, + ) -> Result<TmuxPaneTarget, String> { + let (pane_index, pane_id, handle) = { + let inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get(window_index) + .ok_or_else(|| "window not found".to_string())?; + let pane_index = window + .panes + .iter() + .enumerate() + .position(|(index, pane)| tmux_pane_matches(index, pane, lookup)) + .ok_or_else(|| format!("pane not found: {lookup}"))?; + let pane = &window.panes[pane_index]; + (pane_index, pane.pane_id.clone(), pane.handle.clone()) + }; + Ok(TmuxPaneTarget { + session, + window_index, + pane_index, + pane_id, + handle, + }) + } + + fn tmux_create_window( + &self, + session: &Arc<Session>, + name: Option<String>, + command: &str, + focus: bool, + ) -> Result<(usize, usize), String> { + let (cols, rows) = tmux_size_or_default(session.effective_size()); + let (window_id, pane_id) = { + let mut state = self.inner.state.lock().unwrap(); + let window_id = format!("win-{}", state.next_window_id); + state.next_window_id += 1; + let pane_id = format!("pane-{}", state.next_pane_id); + state.next_pane_id += 1; + (window_id, pane_id) + }; + + let event_daemon = self.clone(); + let pane_events: EventCallback = + Arc::new(move |event| event_daemon.handle_pane_event(event)); + let handle = PaneHandle::spawn(&session.id, &pane_id, command, cols, rows, pane_events)?; + + let window_index = { + let mut inner = session.inner.lock().unwrap(); + let window_index = inner.windows.len(); + inner.windows.push(Window { + id: window_id.clone(), + name: name.unwrap_or_else(|| format!("window-{}", window_index)), + panes: vec![PaneSlot { + pane_id: pane_id.clone(), + command: command.to_string(), + handle, + }], + active_pane: 0, + last_pane: None, + }); + if focus || window_index == 0 { + if window_index > 0 { + inner.last_window = Some(inner.active_window); + } + inner.active_window = window_index; + } + window_index + }; + + let mut state = self.inner.state.lock().unwrap(); + self.emit_event_locked( + &mut state, + "window.open", + json!({ "session_id": session.id, "window_id": window_id }), + ); + self.emit_event_locked( + &mut state, + "pane.open", + json!({ "session_id": session.id, "pane_id": pane_id }), + ); + self.inner.state_cv.notify_all(); + Ok((window_index, 0)) + } + + fn tmux_create_pane( + &self, + session: &Arc<Session>, + window_index: usize, + command: &str, + focus: bool, + ) -> Result<usize, String> { + let (cols, rows) = tmux_size_or_default(session.effective_size()); + let pane_id = { + let mut state = self.inner.state.lock().unwrap(); + let pane_id = format!("pane-{}", state.next_pane_id); + state.next_pane_id += 1; + pane_id + }; + + let event_daemon = self.clone(); + let pane_events: EventCallback = + Arc::new(move |event| event_daemon.handle_pane_event(event)); + let handle = PaneHandle::spawn(&session.id, &pane_id, command, cols, rows, pane_events)?; + + let pane_index = { + let mut inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get_mut(window_index) + .ok_or_else(|| "window not found".to_string())?; + let pane_index = window.panes.len(); + window.panes.push(PaneSlot { + pane_id: pane_id.clone(), + command: command.to_string(), + handle, + }); + if focus { + window.last_pane = Some(window.active_pane); + window.active_pane = pane_index; + } + pane_index + }; + + let mut state = self.inner.state.lock().unwrap(); + self.emit_event_locked( + &mut state, + "pane.open", + json!({ "session_id": session.id, "pane_id": pane_id }), + ); + self.inner.state_cv.notify_all(); + Ok(pane_index) + } + + fn tmux_select_window( + &self, + session: &Arc<Session>, + window_index: usize, + ) -> Result<(), String> { + let mut inner = session.inner.lock().unwrap(); + if window_index >= inner.windows.len() { + return Err("window not found".to_string()); + } + if inner.active_window != window_index { + inner.last_window = Some(inner.active_window); + inner.active_window = window_index; + } + Ok(()) + } + + fn tmux_select_pane( + &self, + session: &Arc<Session>, + window_index: usize, + pane_index: usize, + ) -> Result<(), String> { + let mut inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get_mut(window_index) + .ok_or_else(|| "window not found".to_string())?; + if pane_index >= window.panes.len() { + return Err("pane not found".to_string()); + } + if window.active_pane != pane_index { + window.last_pane = Some(window.active_pane); + window.active_pane = pane_index; + } + Ok(()) + } + + fn tmux_kill_window(&self, session: &Arc<Session>, window_index: usize) -> Result<(), String> { + let (handles, pane_ids, close_events) = { + let mut inner = session.inner.lock().unwrap(); + if window_index >= inner.windows.len() { + return Err("window not found".to_string()); + } + let window = inner.windows.remove(window_index); + let window_id = window.id.clone(); + let pane_ids = window + .panes + .iter() + .map(|pane| pane.pane_id.clone()) + .collect::<Vec<_>>(); + let mut close_events = Vec::with_capacity(window.panes.len() + 1); + for pane in &window.panes { + close_events.push(( + "pane.close", + json!({ "session_id": session.id, "pane_id": pane.pane_id }), + )); + } + close_events.push(( + "window.close", + json!({ "session_id": session.id, "window_id": window_id }), + )); + if inner.windows.is_empty() { + inner.active_window = 0; + inner.last_window = None; + } else { + inner.active_window = + rebase_index(inner.active_window, window_index, inner.windows.len()); + inner.last_window = + rebase_optional_index(inner.last_window, window_index, inner.windows.len()); + } + ( + window + .panes + .into_iter() + .map(|pane| pane.handle) + .collect::<Vec<_>>(), + pane_ids, + close_events, + ) + }; + { + let mut state = self.inner.state.lock().unwrap(); + for (kind, payload) in close_events { + self.emit_event_locked(&mut state, kind, payload); + } + self.inner.state_cv.notify_all(); + } + for pane_id in pane_ids { + self.tmux_close_pipe(&pane_id); + } + for handle in handles { + handle.close(); + } + if session.inner.lock().unwrap().windows.is_empty() { + let _ = self.close_session(&session.id); + } + Ok(()) + } + + fn tmux_kill_pane( + &self, + session: &Arc<Session>, + window_index: usize, + pane_index: usize, + ) -> Result<(), String> { + let (handle, pane_id, empty_after_remove) = { + let mut inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get_mut(window_index) + .ok_or_else(|| "window not found".to_string())?; + if pane_index >= window.panes.len() { + return Err("pane not found".to_string()); + } + let pane = window.panes.remove(pane_index); + if window.panes.is_empty() { + (pane.handle, pane.pane_id, true) + } else { + window.active_pane = + rebase_index(window.active_pane, pane_index, window.panes.len()); + window.last_pane = + rebase_optional_index(window.last_pane, pane_index, window.panes.len()); + (pane.handle, pane.pane_id, false) + } + }; + { + let mut state = self.inner.state.lock().unwrap(); + self.emit_event_locked( + &mut state, + "pane.close", + json!({ "session_id": session.id, "pane_id": pane_id }), + ); + self.inner.state_cv.notify_all(); + } + self.tmux_close_pipe(&pane_id); + handle.close(); + if empty_after_remove { + self.tmux_kill_window(session, window_index)?; + } + Ok(()) + } + + fn tmux_last_window(&self, session: &Arc<Session>) -> Result<(), String> { + let mut inner = session.inner.lock().unwrap(); + if let Some(last_window) = inner + .last_window + .filter(|value| *value < inner.windows.len()) + { + let current = inner.active_window; + inner.active_window = last_window; + inner.last_window = Some(current); + } + Ok(()) + } + + fn tmux_cycle_window(&self, session: &Arc<Session>, delta: isize) -> Result<(), String> { + let mut inner = session.inner.lock().unwrap(); + if inner.windows.is_empty() { + return Err("session has no windows".to_string()); + } + let len = inner.windows.len() as isize; + inner.last_window = Some(inner.active_window); + inner.active_window = ((inner.active_window as isize + delta).rem_euclid(len)) as usize; + Ok(()) + } + + fn tmux_last_pane(&self, session: &Arc<Session>, window_index: usize) -> Result<(), String> { + let mut inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get_mut(window_index) + .ok_or_else(|| "window not found".to_string())?; + if let Some(last_pane) = window.last_pane.filter(|value| *value < window.panes.len()) { + let current = window.active_pane; + window.active_pane = last_pane; + window.last_pane = Some(current); + } + Ok(()) + } + + fn tmux_respawn_pane( + &self, + session: &Arc<Session>, + window_index: usize, + pane_index: usize, + command: &str, + ) -> Result<(), String> { + let pane_id = self.tmux_pane_id(session, window_index, pane_index)?; + let (cols, rows) = tmux_size_or_default(session.effective_size()); + let event_daemon = self.clone(); + let pane_events: EventCallback = + Arc::new(move |event| event_daemon.handle_pane_event(event)); + let handle = PaneHandle::spawn(&session.id, &pane_id, command, cols, rows, pane_events)?; + self.tmux_close_pipe(&pane_id); + let old_handle = { + let mut inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get_mut(window_index) + .ok_or_else(|| "window not found".to_string())?; + let pane = window + .panes + .get_mut(pane_index) + .ok_or_else(|| "pane not found".to_string())?; + pane.command = command.to_string(); + std::mem::replace(&mut pane.handle, handle) + }; + old_handle.close(); + Ok(()) + } + + fn tmux_find_windows( + &self, + target_session: Option<&str>, + query: &str, + ) -> Result<Vec<String>, String> { + let sessions = if let Some(target_session) = target_session { + vec![self.tmux_resolve_session(Some(target_session))?] + } else { + self.sessions() + }; + + let mut lines = Vec::new(); + for session in sessions { + let inner = session.inner.lock().unwrap(); + for window in &inner.windows { + let mut matched = query.is_empty() || window.name.contains(query); + if !matched { + for pane in &window.panes { + if let Ok(capture) = pane.handle.capture(true) { + let content = + join_history(&capture.capture.history, &capture.capture.visible); + if content.contains(query) { + matched = true; + break; + } + } + } + } + if matched { + lines.push(format!( + "{} {}", + tmux_window_display_id(&window.id), + window.name + )); + } + } + } + Ok(lines) + } + + fn tmux_open_pipe(&self, pane_id: &str, shell_command: &str) -> Result<(), String> { + let mut child = Command::new("/bin/sh") + .arg("-lc") + .arg(shell_command) + .stdin(Stdio::piped()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .map_err(|err| err.to_string())?; + let stdin = child + .stdin + .take() + .ok_or_else(|| "pipe-pane child missing stdin".to_string())?; + thread::spawn(move || { + let _ = child.wait(); + }); + + let mut state = self.inner.state.lock().unwrap(); + state + .pane_pipes + .insert(pane_id.to_string(), Arc::new(Mutex::new(stdin))); + Ok(()) + } + + fn tmux_close_pipe(&self, pane_id: &str) { + let mut state = self.inner.state.lock().unwrap(); + state.pane_pipes.remove(pane_id); + } + + fn tmux_window_id( + &self, + session: &Arc<Session>, + window_index: usize, + ) -> Result<String, String> { + session + .inner + .lock() + .unwrap() + .windows + .get(window_index) + .map(|window| window.id.clone()) + .ok_or_else(|| "window not found".to_string()) + } + + fn tmux_pane_id( + &self, + session: &Arc<Session>, + window_index: usize, + pane_index: usize, + ) -> Result<String, String> { + session + .inner + .lock() + .unwrap() + .windows + .get(window_index) + .and_then(|window| window.panes.get(pane_index)) + .map(|pane| pane.pane_id.clone()) + .ok_or_else(|| "pane not found".to_string()) + } + + fn tmux_format_context( + &self, + session: &Arc<Session>, + window_index: usize, + pane_index: Option<usize>, + ) -> Result<BTreeMap<String, String>, String> { + let inner = session.inner.lock().unwrap(); + let window = inner + .windows + .get(window_index) + .ok_or_else(|| "window not found".to_string())?; + let mut context = BTreeMap::new(); + context.insert("session_name".to_string(), session.id.clone()); + context.insert( + "session_id".to_string(), + tmux_session_display_id(&session.id), + ); + context.insert("window_id".to_string(), tmux_window_display_id(&window.id)); + context.insert("window_name".to_string(), window.name.clone()); + context.insert("window_index".to_string(), window_index.to_string()); + context.insert( + "window_active".to_string(), + if inner.active_window == window_index { + "1" + } else { + "0" + } + .to_string(), + ); + if let Some(pane_index) = pane_index { + let pane = window + .panes + .get(pane_index) + .ok_or_else(|| "pane not found".to_string())?; + let state = pane.handle.shared.state.lock().unwrap(); + context.insert("pane_id".to_string(), tmux_pane_display_id(&pane.pane_id)); + context.insert("pane_index".to_string(), pane_index.to_string()); + context.insert( + "pane_active".to_string(), + if window.active_pane == pane_index { + "1" + } else { + "0" + } + .to_string(), + ); + context.insert("pane_title".to_string(), state.title.clone()); + context.insert("pane_current_path".to_string(), state.pwd.clone()); + context.insert( + "pane_current_command".to_string(), + tmux_command_name(&pane.command), + ); + } + Ok(context) + } + + fn session_close_events(&self, session: &Arc<Session>) -> Vec<(&'static str, Value)> { + let inner = session.inner.lock().unwrap(); + let mut events = Vec::new(); + for window in &inner.windows { + for pane in &window.panes { + events.push(( + "pane.close", + json!({ "session_id": session.id, "pane_id": pane.pane_id }), + )); + } + events.push(( + "window.close", + json!({ "session_id": session.id, "window_id": window.id }), + )); + } + events + } +} + +#[derive(Debug)] +enum OpenTerminalError { + AlreadyExists, + Other(String), +} + +struct TmuxWindowTarget { + session: Arc<Session>, + window_index: usize, +} + +struct TmuxPaneTarget { + session: Arc<Session>, + window_index: usize, + pane_index: usize, + pane_id: String, + handle: Arc<PaneHandle>, +} + +#[derive(Default)] +struct ParsedTmuxArgs { + flags: BTreeSet<String>, + values: BTreeMap<String, String>, + positional: Vec<String>, +} + +impl ParsedTmuxArgs { + fn has_flag(&self, flag: &str) -> bool { + self.flags.contains(flag) + } + + fn value(&self, flag: &str) -> Option<&str> { + self.values.get(flag).map(String::as_str) + } + + fn positional(&self) -> &[String] { + &self.positional + } +} + +fn parse_tmux_args( + args: &[String], + value_flags: &[&str], + bool_flags: &[&str], +) -> Result<ParsedTmuxArgs, String> { + let value_flags: BTreeSet<&str> = value_flags.iter().copied().collect(); + let bool_flags: BTreeSet<&str> = bool_flags.iter().copied().collect(); + let mut parsed = ParsedTmuxArgs::default(); + let mut idx = 0; + while idx < args.len() { + let arg = args[idx].as_str(); + if arg == "--" { + parsed.positional.extend(args[idx + 1..].iter().cloned()); + break; + } + if value_flags.contains(arg) { + idx += 1; + if idx >= args.len() { + return Err(format!("{arg} requires a value")); + } + parsed.values.insert(arg.to_string(), args[idx].clone()); + idx += 1; + continue; + } + if bool_flags.contains(arg) { + parsed.flags.insert(arg.to_string()); + idx += 1; + continue; + } + parsed.positional.extend(args[idx..].iter().cloned()); + break; + } + Ok(parsed) +} + +fn tmux_open_terminal_error(err: OpenTerminalError) -> String { + match err { + OpenTerminalError::AlreadyExists => "session already exists".to_string(), + OpenTerminalError::Other(err) => err, + } +} + +fn tmux_split_target(raw: &str) -> Option<(&str, &str, &str)> { + let (session, rest) = raw.split_once(':')?; + let (window, pane) = rest.split_once('.').unwrap_or((rest, "")); + Some((session, window, pane)) +} + +fn tmux_session_display_id(session_id: &str) -> String { + format!("${session_id}") +} + +fn tmux_window_display_id(window_id: &str) -> String { + format!("@{window_id}") +} + +fn tmux_pane_display_id(pane_id: &str) -> String { + format!("%{pane_id}") +} + +fn tmux_window_matches(index: usize, window: &Window, lookup: &str) -> bool { + window.id == lookup || window.name == lookup || lookup.parse::<usize>().ok() == Some(index) +} + +fn tmux_pane_matches(index: usize, pane: &PaneSlot, lookup: &str) -> bool { + pane.pane_id == lookup + || tmux_command_name(&pane.command) == lookup + || lookup.parse::<usize>().ok() == Some(index) +} + +fn tmux_size_or_default((cols, rows): (u16, u16)) -> (u16, u16) { + let cols = if cols == 0 { 80 } else { cols.max(2) }; + let rows = if rows == 0 { 24 } else { rows.max(1) }; + (cols, rows) +} + +fn tmux_shell_command(tokens: &[String], cwd: Option<&str>) -> String { + let base = if tokens.is_empty() { + "exec ${SHELL:-/bin/sh} -l".to_string() + } else { + tokens.join(" ") + }; + match cwd { + Some(cwd) if !cwd.trim().is_empty() => format!("cd {} && {base}", tmux_shell_quote(cwd)), + _ => base, + } +} + +fn tmux_shell_quote(value: &str) -> String { + format!("'{}'", value.replace('\'', r"'\''")) +} + +fn tmux_command_name(command: &str) -> String { + command + .split_whitespace() + .next() + .unwrap_or_default() + .rsplit('/') + .next() + .unwrap_or_default() + .to_string() +} + +fn tmux_send_keys_bytes(tokens: &[String], literal: bool) -> Vec<u8> { + let mut out = Vec::new(); + for token in tokens { + if literal { + out.extend_from_slice(token.as_bytes()); + continue; + } + match token.as_str() { + "Enter" | "C-m" => out.push(b'\r'), + "Tab" => out.push(b'\t'), + "Space" => out.push(b' '), + "Escape" | "Esc" => out.push(0x1b), + "BSpace" | "Backspace" => out.push(0x7f), + "C-c" => out.push(0x03), + "C-d" => out.push(0x04), + other => out.extend_from_slice(other.as_bytes()), + } + } + out +} + +fn tmux_capture_text( + capture: &crate::capture::TerminalCapture, + include_history: bool, + start: Option<&str>, + end: Option<&str>, +) -> String { + let source = if include_history { + join_history(&capture.history, &capture.visible) + } else { + capture.visible.clone() + }; + let mut lines: Vec<&str> = source.lines().collect(); + if source.ends_with('\n') { + lines.push(""); + } + if lines.is_empty() { + return String::new(); + } + let start_index = tmux_line_index(start, lines.len(), 0); + let end_index = tmux_line_index(end, lines.len(), lines.len().saturating_sub(1)); + if start_index > end_index || start_index >= lines.len() { + return String::new(); + } + lines[start_index..=end_index.min(lines.len() - 1)].join("\n") +} + +fn tmux_line_index(value: Option<&str>, line_count: usize, default: usize) -> usize { + let Some(value) = value else { + return default; + }; + if value == "-" { + return if default == 0 { + 0 + } else { + line_count.saturating_sub(1) + }; + } + match value.parse::<i64>() { + Ok(number) if number < 0 => line_count.saturating_sub(number.unsigned_abs() as usize), + Ok(number) => number as usize, + Err(_) => default, + } +} + +fn tmux_render_format( + format: Option<&str>, + context: &BTreeMap<String, String>, + fallback: &str, +) -> String { + let Some(format) = format.filter(|value| !value.is_empty()) else { + return fallback.to_string(); + }; + let mut rendered = format.to_string(); + for (key, value) in context { + rendered = rendered.replace(&format!("#{{{key}}}"), value); + } + rendered +} + +fn tmux_result(stdout: String, extra: Value) -> Value { + let mut result = extra.as_object().cloned().unwrap_or_default(); + result.insert("stdout".to_string(), json!(stdout)); + Value::Object(result) +} + +fn tmux_line_output(value: &str) -> String { + if value.is_empty() { + String::new() + } else if value.ends_with('\n') { + value.to_string() + } else { + format!("{value}\n") + } +} + +#[derive(Default)] +struct DirectAuthorizer { + capabilities: BTreeSet<String>, + claimed_session_id: String, + claimed_attachment_id: String, + active_session_id: String, + active_attachment_id: String, + grant: RequestGrant, + used: bool, +} + +#[derive(Default, PartialEq, Eq)] +enum RequestGrant { + #[default] + None, + Open, + Attach, +} + +impl DirectAuthorizer { + fn new(claims: TicketClaims) -> Self { + Self { + capabilities: claims.capabilities.into_iter().collect(), + claimed_session_id: claims.session_id, + claimed_attachment_id: claims.attachment_id, + active_session_id: String::new(), + active_attachment_id: String::new(), + grant: RequestGrant::None, + used: false, + } + } + + fn handle(&mut self, daemon: &Daemon, request: &Request) -> Response { + if let Some(response) = self.authorize(request) { + return with_id(response, request.id.clone()); + } + let response = daemon.handle_request(request); + if response.ok { + self.observe(request, &response); + } + response + } + + fn authorize(&self, request: &Request) -> Option<Response> { + match request.method.as_str() { + "hello" | "ping" => None, + "terminal.open" => { + if !self.capabilities.contains("session.open") { + Some(rpc_error( + None, + "unauthorized", + "ticket missing session.open capability", + )) + } else if self.used { + Some(rpc_error( + None, + "unauthorized", + "ticket is already bound to a terminal session", + )) + } else { + None + } + } + "session.attach" => { + if !self.capabilities.contains("session.attach") { + return Some(rpc_error( + None, + "unauthorized", + "ticket missing session.attach capability", + )); + } + let session_id = get_string(&request.params, "session_id").unwrap_or_default(); + let attachment_id = + get_string(&request.params, "attachment_id").unwrap_or_default(); + if session_id.is_empty() || attachment_id.is_empty() { + return None; + } + let Some((allowed_session, allowed_attachment)) = self.allowed_scope() else { + return Some(rpc_error( + None, + "unauthorized", + "direct session.attach tickets require session and attachment scope", + )); + }; + if allowed_session != session_id || allowed_attachment != attachment_id { + Some(rpc_error( + None, + "unauthorized", + "request exceeds direct ticket session scope", + )) + } else { + None + } + } + "terminal.read" | "terminal.write" | "session.status" | "session.close" => { + self.authorize_established(request, false) + } + "session.resize" | "session.detach" => self.authorize_established(request, true), + _ => Some(rpc_error( + None, + "unauthorized", + "request is not allowed for this direct ticket", + )), + } + } + + fn authorize_established(&self, request: &Request, needs_attachment: bool) -> Option<Response> { + let session_id = get_string(&request.params, "session_id").unwrap_or_default(); + if session_id.is_empty() { + return None; + } + if self.grant == RequestGrant::None || self.active_session_id.is_empty() { + return Some(rpc_error( + None, + "unauthorized", + "request requires an opened or attached terminal session", + )); + } + if session_id != self.active_session_id { + return Some(rpc_error( + None, + "unauthorized", + "request exceeds direct ticket session scope", + )); + } + if needs_attachment { + let attachment_id = get_string(&request.params, "attachment_id").unwrap_or_default(); + if attachment_id.is_empty() { + return None; + } + if attachment_id != self.active_attachment_id { + return Some(rpc_error( + None, + "unauthorized", + "request exceeds direct ticket attachment scope", + )); + } + } + None + } + + fn observe(&mut self, request: &Request, response: &Response) { + match request.method.as_str() { + "terminal.open" => { + if let Some((session_id, attachment_id)) = response_scope(response.result.as_ref()) + { + self.active_session_id = session_id; + self.active_attachment_id = attachment_id; + self.grant = RequestGrant::Open; + self.used = true; + } + } + "session.attach" => { + let session_id = get_string(&request.params, "session_id").unwrap_or_default(); + let attachment_id = + get_string(&request.params, "attachment_id").unwrap_or_default(); + if !session_id.is_empty() && !attachment_id.is_empty() { + self.active_session_id = session_id.to_string(); + self.active_attachment_id = attachment_id.to_string(); + self.grant = RequestGrant::Attach; + self.used = true; + } + } + "session.close" | "session.detach" => { + self.grant = RequestGrant::None; + self.active_session_id.clear(); + self.active_attachment_id.clear(); + } + _ => {} + } + } + + fn allowed_scope(&self) -> Option<(&str, &str)> { + if self.grant != RequestGrant::None + && !self.active_session_id.is_empty() + && !self.active_attachment_id.is_empty() + { + Some((&self.active_session_id, &self.active_attachment_id)) + } else if !self.claimed_session_id.is_empty() && !self.claimed_attachment_id.is_empty() { + Some((&self.claimed_session_id, &self.claimed_attachment_id)) + } else { + None + } + } +} + +fn with_id(mut response: Response, id: Option<Value>) -> Response { + response.id = id; + response +} + +fn response_scope(result: Option<&Value>) -> Option<(String, String)> { + let result = result?; + let session_id = result.get("session_id")?.as_str()?.to_string(); + let attachment_id = result.get("attachment_id")?.as_str()?.to_string(); + Some((session_id, attachment_id)) +} + +fn trim_crlf(frame: &[u8]) -> &[u8] { + let mut end = frame.len(); + while end > 0 && (frame[end - 1] == b'\n' || frame[end - 1] == b'\r') { + end -= 1; + } + &frame[..end] +} + +fn collect_panes(session: &Arc<Session>) -> Vec<Arc<PaneHandle>> { + let inner = session.inner.lock().unwrap(); + inner + .windows + .iter() + .flat_map(|window| window.panes.iter().map(|pane| Arc::clone(&pane.handle))) + .collect() +} + +fn collect_events( + state: &CoreState, + cursor: u64, + filters: &BTreeSet<String>, + session_id: Option<&str>, + pane_id: Option<&str>, +) -> Vec<Value> { + let start = cursor.max(state.event_base_cursor); + let offset = start.saturating_sub(state.event_base_cursor) as usize; + state + .events + .iter() + .skip(offset) + .filter(|event| { + let kind = event + .get("kind") + .and_then(Value::as_str) + .unwrap_or_default(); + let session_matches = session_id + .map(|value| event.get("session_id").and_then(Value::as_str) == Some(value)) + .unwrap_or(true); + let pane_matches = pane_id + .map(|value| event.get("pane_id").and_then(Value::as_str) == Some(value)) + .unwrap_or(true); + (filters.is_empty() || filters.contains(kind)) && session_matches && pane_matches + }) + .cloned() + .collect() +} + +fn snapshot_value( + snapshot: SessionSnapshot, + attachment_id: Option<String>, + offset: Option<u64>, +) -> Value { + let mut value = serde_json::to_value(snapshot).unwrap_or_else(|_| json!({})); + if let Some(object) = value.as_object_mut() { + if let Some(attachment_id) = attachment_id { + object.insert("attachment_id".to_string(), json!(attachment_id)); + } + if let Some(offset) = offset { + object.insert("offset".to_string(), json!(offset)); + } + } + value +} + +fn get_string<'a>(params: &'a Value, key: &str) -> Option<&'a str> { + params.get(key).and_then(Value::as_str) +} + +fn get_bool(params: &Value, key: &str) -> Option<bool> { + params.get(key).and_then(Value::as_bool) +} + +fn get_non_negative_i64(params: &Value, key: &str) -> Option<i64> { + match params.get(key) { + Some(Value::Number(value)) => value.as_i64().filter(|value| *value >= 0), + Some(Value::String(value)) => value.parse::<i64>().ok().filter(|value| *value >= 0), + _ => None, + } +} + +fn get_non_negative_u64(params: &Value, key: &str) -> Option<u64> { + get_non_negative_i64(params, key).map(|value| value as u64) +} + +fn get_positive_u16(params: &Value, key: &str) -> Option<u16> { + get_non_negative_i64(params, key) + .filter(|value| *value > 0 && *value <= u16::MAX as i64) + .map(|value| value as u16) +} + +fn get_positive_usize(params: &Value, key: &str) -> Option<usize> { + get_non_negative_i64(params, key) + .filter(|value| *value > 0) + .map(|value| value as usize) +} + +fn get_filters(params: &Value) -> BTreeSet<String> { + let filter_value = params.get("filters").or_else(|| params.get("filter")); + match filter_value { + Some(Value::String(value)) => value + .split(',') + .map(str::trim) + .filter(|value| !value.is_empty()) + .map(ToString::to_string) + .collect(), + Some(Value::Array(values)) => values + .iter() + .filter_map(Value::as_str) + .map(ToString::to_string) + .collect(), + _ => BTreeSet::new(), + } +} + +fn join_history(history: &str, visible: &str) -> String { + match (history.is_empty(), visible.is_empty()) { + (true, true) => String::new(), + (false, true) => history.to_string(), + (true, false) => visible.to_string(), + (false, false) => format!("{history}\n{visible}"), + } +} + +fn rebase_index(index: usize, removed: usize, len_after_remove: usize) -> usize { + if len_after_remove == 0 { + return 0; + } + if index > removed { + index - 1 + } else if index >= len_after_remove { + len_after_remove - 1 + } else { + index + } +} + +fn rebase_optional_index( + index: Option<usize>, + removed: usize, + len_after_remove: usize, +) -> Option<usize> { + let index = index?; + if len_after_remove == 0 || index == removed { + return None; + } + Some(rebase_index(index, removed, len_after_remove)) +} + +fn allocate_generated_session_id(state: &mut CoreState) -> String { + loop { + let value = format!("sess-{}", state.next_session_id); + state.next_session_id += 1; + if !state.sessions.contains_key(&value) { + return value; + } + } +} + +fn unix_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|value| value.as_millis() as u64) + .unwrap_or_default() +} + +fn unix_now_secs() -> i64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|value| value.as_secs() as i64) + .unwrap_or_default() +} + +fn load_certs(path: &str) -> Result<Vec<CertificateDer<'static>>, String> { + let data = fs::read(path).map_err(|err| err.to_string())?; + let mut reader = BufReader::new(data.as_slice()); + rustls_pemfile::certs(&mut reader) + .collect::<Result<Vec<_>, _>>() + .map_err(|err| err.to_string()) +} + +fn load_key(path: &str) -> Result<PrivateKeyDer<'static>, String> { + let data = fs::read(path).map_err(|err| err.to_string())?; + let mut reader = BufReader::new(data.as_slice()); + rustls_pemfile::private_key(&mut reader) + .map_err(|err| err.to_string())? + .ok_or_else(|| "missing private key".to_string()) +} + +const MAX_HTTP_REQUEST_BYTES: usize = 8 * 1024; +const WEBSOCKET_MAGIC: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; + +impl Daemon { + fn serve_websocket_stream<S: Read + Write>( + &self, + stream: S, + secret: &str, + ) -> Result<(), String> { + let mut reader = BufReader::new(stream); + let request = read_http_request(&mut reader)?; + if !is_websocket_upgrade(&request) { + return Err("missing websocket upgrade".to_string()); + } + let ws_key = header_value(&request, "sec-websocket-key") + .ok_or_else(|| "missing websocket key".to_string())?; + write_websocket_upgrade(reader.get_mut(), ws_key)?; + + let Some(auth_message) = read_ws_text_message(&mut reader)? else { + return Ok(()); + }; + if !websocket_secret_matches(&auth_message, secret) { + write_ws_json_value( + reader.get_mut(), + &json!({ + "ok": false, + "error": { + "code": "unauthorized", + "message": "invalid secret", + } + }), + )?; + return Ok(()); + } + write_ws_json_value( + reader.get_mut(), + &json!({ + "ok": true, + "result": { + "authenticated": true, + } + }), + )?; + + loop { + let Some(message) = read_ws_text_message(&mut reader)? else { + return Ok(()); + }; + if message.trim().is_empty() { + continue; + } + let response = self.parse_and_dispatch(message.as_bytes(), None); + write_ws_response(reader.get_mut(), &response)?; + } + } +} + +fn read_http_request<S: Read>(reader: &mut BufReader<S>) -> Result<String, String> { + let mut request = String::new(); + loop { + let mut line = String::new(); + let read = reader.read_line(&mut line).map_err(|err| err.to_string())?; + if read == 0 { + return Err("connection closed".to_string()); + } + if request.len() + line.len() > MAX_HTTP_REQUEST_BYTES { + return Err("websocket HTTP request too large".to_string()); + } + request.push_str(&line); + if line == "\r\n" || line == "\n" { + return Ok(request); + } + } +} + +fn is_websocket_upgrade(request: &str) -> bool { + header_value(request, "upgrade") + .map(|value| value.eq_ignore_ascii_case("websocket")) + .unwrap_or(false) +} + +fn header_value<'a>(request: &'a str, name: &str) -> Option<&'a str> { + request + .split("\r\n") + .filter_map(|line| line.split_once(':')) + .find_map(|(key, value)| { + key.trim() + .eq_ignore_ascii_case(name) + .then_some(value.trim()) + }) +} + +fn write_websocket_upgrade<S: Write>(stream: &mut S, ws_key: &str) -> Result<(), String> { + let mut hasher = Sha1::new(); + hasher.update(ws_key.as_bytes()); + hasher.update(WEBSOCKET_MAGIC.as_bytes()); + let accept = base64::engine::general_purpose::STANDARD.encode(hasher.finalize()); + stream + .write_all( + format!( + "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: {accept}\r\n\r\n" + ) + .as_bytes(), + ) + .map_err(|err| err.to_string())?; + stream.flush().map_err(|err| err.to_string()) +} + +fn websocket_secret_matches(payload: &str, secret: &str) -> bool { + serde_json::from_str::<Value>(payload) + .ok() + .and_then(|value| { + value + .get("secret") + .and_then(Value::as_str) + .map(|candidate| candidate == secret) + }) + .unwrap_or(false) +} + +fn read_ws_text_message<S: Read + Write>( + reader: &mut BufReader<S>, +) -> Result<Option<String>, String> { + loop { + let mut header = [0u8; 2]; + match reader.read_exact(&mut header) { + Ok(()) => {} + Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None), + Err(err) => return Err(err.to_string()), + } + + let opcode = header[0] & 0x0F; + let masked = (header[1] & 0x80) != 0; + let payload = read_ws_payload(reader, header[1] & 0x7F, masked)?; + + match opcode { + 0x08 => return Ok(None), + 0x09 => { + write_ws_frame(reader.get_mut(), 0x0A, &payload)?; + } + 0x01 => { + let text = String::from_utf8(payload).map_err(|err| err.to_string())?; + return Ok(Some(text)); + } + _ => {} + } + } +} + +fn read_ws_payload<S: Read + Write>( + reader: &mut BufReader<S>, + len_byte: u8, + masked: bool, +) -> Result<Vec<u8>, String> { + let mut payload_len = len_byte as u64; + if len_byte == 126 { + let mut extended = [0u8; 2]; + reader + .read_exact(&mut extended) + .map_err(|err| err.to_string())?; + payload_len = u16::from_be_bytes(extended) as u64; + } else if len_byte == 127 { + let mut extended = [0u8; 8]; + reader + .read_exact(&mut extended) + .map_err(|err| err.to_string())?; + payload_len = u64::from_be_bytes(extended); + } + if payload_len > crate::rpc::MAX_FRAME_BYTES as u64 { + return Err("websocket frame exceeds maximum size".to_string()); + } + + let mut mask_key = [0u8; 4]; + if masked { + reader + .read_exact(&mut mask_key) + .map_err(|err| err.to_string())?; + } + + let mut payload = vec![0u8; payload_len as usize]; + if !payload.is_empty() { + reader + .read_exact(&mut payload) + .map_err(|err| err.to_string())?; + } + if masked { + for (index, byte) in payload.iter_mut().enumerate() { + *byte ^= mask_key[index % mask_key.len()]; + } + } + + Ok(payload) +} + +fn write_ws_json_value<S: Write>(stream: &mut S, value: &Value) -> Result<(), String> { + let payload = serde_json::to_vec(value).map_err(|err| err.to_string())?; + write_ws_frame(stream, 0x01, &payload) +} + +fn write_ws_response<S: Write>(stream: &mut S, response: &Response) -> Result<(), String> { + let payload = serde_json::to_vec(response).map_err(|err| err.to_string())?; + write_ws_frame(stream, 0x01, &payload) +} + +fn write_ws_frame<S: Write>(stream: &mut S, opcode: u8, data: &[u8]) -> Result<(), String> { + let mut header = [0u8; 10]; + header[0] = 0x80 | opcode; + let header_len = if data.len() <= 125 { + header[1] = data.len() as u8; + 2 + } else if data.len() <= u16::MAX as usize { + header[1] = 126; + header[2..4].copy_from_slice(&(data.len() as u16).to_be_bytes()); + 4 + } else { + header[1] = 127; + header[2..10].copy_from_slice(&(data.len() as u64).to_be_bytes()); + 10 + }; + stream + .write_all(&header[..header_len]) + .map_err(|err| err.to_string())?; + if !data.is_empty() { + stream.write_all(data).map_err(|err| err.to_string())?; + } + stream.flush().map_err(|err| err.to_string()) +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + use std::{ + io::{BufRead, BufReader, Read, Write}, + net::Shutdown, + os::unix::net::UnixStream, + thread, + time::Duration, + }; + + fn write_masked_ws_text_frame(stream: &mut UnixStream, text: &str) { + let payload = text.as_bytes(); + let mask = [0x12, 0x34, 0x56, 0x78]; + let mut header = Vec::with_capacity(14); + header.push(0x81); + if payload.len() <= 125 { + header.push(0x80 | payload.len() as u8); + } else if payload.len() <= u16::MAX as usize { + header.push(0x80 | 126); + header.extend_from_slice(&(payload.len() as u16).to_be_bytes()); + } else { + header.push(0x80 | 127); + header.extend_from_slice(&(payload.len() as u64).to_be_bytes()); + } + header.extend_from_slice(&mask); + stream.write_all(&header).unwrap(); + + let mut masked = payload.to_vec(); + for (index, byte) in masked.iter_mut().enumerate() { + *byte ^= mask[index % mask.len()]; + } + stream.write_all(&masked).unwrap(); + stream.flush().unwrap(); + } + + fn read_ws_text_frame(reader: &mut BufReader<UnixStream>) -> String { + let mut header = [0u8; 2]; + reader.read_exact(&mut header).unwrap(); + let mut payload_len = (header[1] & 0x7F) as usize; + if payload_len == 126 { + let mut extended = [0u8; 2]; + reader.read_exact(&mut extended).unwrap(); + payload_len = u16::from_be_bytes(extended) as usize; + } else if payload_len == 127 { + let mut extended = [0u8; 8]; + reader.read_exact(&mut extended).unwrap(); + payload_len = u64::from_be_bytes(extended) as usize; + } + let mut payload = vec![0u8; payload_len]; + if payload_len > 0 { + reader.read_exact(&mut payload).unwrap(); + } + String::from_utf8(payload).unwrap() + } + + #[test] + fn websocket_stream_authenticates_and_dispatches_requests() { + let daemon = Daemon::new("test"); + let (client, server) = UnixStream::pair().unwrap(); + let server_thread = thread::spawn(move || { + daemon.serve_websocket_stream(server, "secret").unwrap(); + }); + + let mut client = client; + client + .write_all( + b"GET / HTTP/1.1\r\nHost: localhost\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Key: SGVsbG9Xb3JsZA==\r\nSec-WebSocket-Version: 13\r\n\r\n", + ) + .unwrap(); + client.flush().unwrap(); + + let mut reader = BufReader::new(client.try_clone().unwrap()); + let mut status_line = String::new(); + reader.read_line(&mut status_line).unwrap(); + assert!(status_line.starts_with("HTTP/1.1 101")); + loop { + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + if line == "\r\n" || line.is_empty() { + break; + } + } + + write_masked_ws_text_frame(&mut client, r#"{"secret":"secret"}"#); + let auth_response: Value = serde_json::from_str(&read_ws_text_frame(&mut reader)).unwrap(); + assert_eq!(auth_response["ok"].as_bool(), Some(true)); + assert_eq!( + auth_response["result"]["authenticated"].as_bool(), + Some(true) + ); + + write_masked_ws_text_frame(&mut client, r#"{"id":1,"method":"hello","params":{}}"#); + let response: Value = serde_json::from_str(&read_ws_text_frame(&mut reader)).unwrap(); + assert_eq!(response["ok"].as_bool(), Some(true)); + assert_eq!(response["id"].as_i64(), Some(1)); + assert_eq!(response["result"]["name"].as_str(), Some("cmuxd-remote")); + + client.shutdown(Shutdown::Both).unwrap(); + server_thread.join().unwrap(); + } + + use crate::auth::{TicketClaims, sign}; + + fn tmux_exec(daemon: &Daemon, argv: &[&str]) -> Value { + daemon + .dispatch_json("tmux.exec", json!({ "argv": argv })) + .unwrap() + } + + fn wait_ready(daemon: &Daemon, session_id: &str) { + daemon + .dispatch_json( + "amux.wait", + json!({ + "kind": "ready", + "session_id": session_id, + "timeout_ms": 5_000, + }), + ) + .unwrap(); + } + + fn event_kinds(result: &Value) -> Vec<&str> { + result["events"] + .as_array() + .unwrap() + .iter() + .map(|event| event["kind"].as_str().unwrap()) + .collect() + } + + fn strip_display_id<'a>(value: &'a str, prefix: char) -> &'a str { + value.trim_start_matches(prefix) + } + + fn encode_ticket(claims: TicketClaims, secret: &[u8]) -> String { + let payload = serde_json::to_vec(&claims).unwrap(); + let encoded_payload = base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(payload); + let encoded_signature = base64::engine::general_purpose::URL_SAFE_NO_PAD + .encode(sign(encoded_payload.as_bytes(), secret)); + format!("{encoded_payload}.{encoded_signature}") + } + + #[test] + fn amux_events_read_accepts_filters_plural_and_session_close_emits_close_events() { + let daemon = Daemon::new("test"); + let opened = tmux_exec(&daemon, &["new-session", "-s", "close-demo", "/bin/cat"]); + let session_id = opened["session_id"].as_str().unwrap(); + wait_ready(&daemon, session_id); + + let cursor = daemon.current_event_cursor(); + daemon + .dispatch_json("session.close", json!({ "session_id": session_id })) + .unwrap(); + + let events = daemon + .dispatch_json( + "amux.events.read", + json!({ + "cursor": cursor, + "timeout_ms": 0, + "filters": ["pane.close", "window.close", "session.close"], + }), + ) + .unwrap(); + + assert_eq!( + event_kinds(&events), + vec!["pane.close", "window.close", "session.close"] + ); + } + + #[test] + fn tmux_targets_accept_dollar_prefixed_session_window_targets() { + let daemon = Daemon::new("test"); + let opened = tmux_exec(&daemon, &["new-session", "-s", "target-demo", "/bin/cat"]); + let session_id = opened["session_id"].as_str().unwrap().to_string(); + wait_ready(&daemon, &session_id); + + let list = tmux_exec( + &daemon, + &[ + "list-panes", + "-t", + "$target-demo:0", + "-F", + "#{session_name}:#{window_index}.#{pane_index}", + ], + ); + assert_eq!(list["stdout"].as_str().unwrap().trim(), "target-demo:0.0"); + + let display = tmux_exec( + &daemon, + &[ + "display-message", + "-t", + "$target-demo:0", + "#{session_name}:#{window_index}.#{pane_index}", + ], + ); + assert_eq!( + display["stdout"].as_str().unwrap().trim(), + "target-demo:0.0" + ); + + daemon + .dispatch_json("session.close", json!({ "session_id": session_id })) + .unwrap(); + } + + #[test] + fn tmux_kill_commands_emit_close_events() { + let daemon = Daemon::new("test"); + let opened = tmux_exec(&daemon, &["new-session", "-s", "kill-demo", "/bin/cat"]); + let session_id = opened["session_id"].as_str().unwrap().to_string(); + wait_ready(&daemon, &session_id); + + let split = tmux_exec(&daemon, &["split-window", "-t", "kill-demo:0", "/bin/cat"]); + let split_pane_id = strip_display_id(split["pane_id"].as_str().unwrap(), '%').to_string(); + let cursor = daemon.current_event_cursor(); + tmux_exec(&daemon, &["kill-pane", "-t", "$kill-demo:0.1"]); + + let pane_events = daemon + .dispatch_json( + "amux.events.read", + json!({ + "cursor": cursor, + "timeout_ms": 0, + "filters": ["pane.close"], + "session_id": session_id, + }), + ) + .unwrap(); + let pane_events = pane_events["events"].as_array().unwrap(); + assert_eq!(pane_events.len(), 1); + assert_eq!(pane_events[0]["kind"].as_str().unwrap(), "pane.close"); + assert_eq!(pane_events[0]["pane_id"].as_str().unwrap(), split_pane_id); + + let new_window = tmux_exec(&daemon, &["new-window", "-t", "kill-demo", "/bin/cat"]); + let window_id = + strip_display_id(new_window["window_id"].as_str().unwrap(), '@').to_string(); + let window_pane_id = + strip_display_id(new_window["pane_id"].as_str().unwrap(), '%').to_string(); + let cursor = daemon.current_event_cursor(); + tmux_exec(&daemon, &["kill-window", "-t", "$kill-demo:1"]); + + let window_events = daemon + .dispatch_json( + "amux.events.read", + json!({ + "cursor": cursor, + "timeout_ms": 0, + "filters": ["pane.close", "window.close", "session.close"], + "session_id": session_id, + }), + ) + .unwrap(); + let window_events = window_events["events"].as_array().unwrap(); + assert_eq!(window_events.len(), 2); + assert_eq!(window_events[0]["kind"].as_str().unwrap(), "pane.close"); + assert_eq!( + window_events[0]["pane_id"].as_str().unwrap(), + window_pane_id + ); + assert_eq!(window_events[1]["kind"].as_str().unwrap(), "window.close"); + assert_eq!(window_events[1]["window_id"].as_str().unwrap(), window_id); + + daemon + .dispatch_json("session.close", json!({ "session_id": session_id })) + .unwrap(); + } + + #[test] + fn amux_wait_signal_tracks_tmux_wait_for_generations() { + let daemon = Daemon::new("test"); + + tmux_exec(&daemon, &["wait-for", "-S", "spec-signal"]); + let first = daemon + .dispatch_json( + "amux.wait", + json!({ + "kind": "signal", + "name": "spec-signal", + "after_generation": 0, + "timeout_ms": 0, + }), + ) + .unwrap(); + assert_eq!(first["name"].as_str().unwrap(), "spec-signal"); + assert_eq!(first["generation"].as_u64().unwrap(), 1); + + let signaler = daemon.clone(); + let signal_thread = thread::spawn(move || { + tmux_exec(&signaler, &["wait-for", "-S", "spec-signal"]); + }); + signal_thread.join().unwrap(); + + let second = daemon + .dispatch_json( + "amux.wait", + json!({ + "kind": "signal", + "name": "spec-signal", + "after_generation": 1, + "timeout_ms": 5_000, + }), + ) + .unwrap(); + assert_eq!(second["name"].as_str().unwrap(), "spec-signal"); + assert_eq!(second["generation"].as_u64().unwrap(), 2); + } + + #[test] + fn tmux_required_format_variables_render_without_placeholders() { + let daemon = Daemon::new("test"); + let opened = tmux_exec( + &daemon, + &[ + "new-session", + "-s", + "fmt-demo", + "-n", + "fmt-window", + "/bin/cat", + ], + ); + let session_id = opened["session_id"].as_str().unwrap().to_string(); + wait_ready(&daemon, &session_id); + + let rendered = tmux_exec( + &daemon, + &[ + "display-message", + "-t", + "fmt-demo:0.0", + "#{session_name}|#{session_id}|#{window_id}|#{window_name}|#{window_index}|#{window_active}|#{pane_id}|#{pane_index}|#{pane_active}|#{pane_title}|#{pane_current_path}|#{pane_current_command}", + ], + )["stdout"] + .as_str() + .unwrap() + .trim() + .to_string(); + + assert!(!rendered.contains("#{")); + let parts: Vec<&str> = rendered.split('|').collect(); + assert_eq!(parts.len(), 12); + assert_eq!(parts[0], "fmt-demo"); + assert_eq!(parts[1], "$fmt-demo"); + assert_eq!(parts[2], opened["window_id"].as_str().unwrap()); + assert_eq!(parts[3], "fmt-window"); + assert_eq!(parts[4], "0"); + assert_eq!(parts[5], "1"); + assert_eq!(parts[6], opened["pane_id"].as_str().unwrap()); + assert_eq!(parts[7], "0"); + assert_eq!(parts[8], "1"); + assert_eq!(parts[11], "cat"); + + daemon + .dispatch_json("session.close", json!({ "session_id": session_id })) + .unwrap(); + } + + #[test] + fn exited_event_is_emitted_once_per_pane_exit() { + let daemon = Daemon::new("test"); + let opened = tmux_exec( + &daemon, + &["new-session", "-s", "exit-demo", "/bin/echo", "done"], + ); + let session_id = opened["session_id"].as_str().unwrap().to_string(); + let pane_id = strip_display_id(opened["pane_id"].as_str().unwrap(), '%').to_string(); + + daemon + .dispatch_json( + "amux.wait", + json!({ + "kind": "exited", + "pane_id": pane_id, + "timeout_ms": 5_000, + }), + ) + .unwrap(); + thread::sleep(Duration::from_millis(100)); + + let exited_events = daemon + .dispatch_json( + "amux.events.read", + json!({ + "cursor": 0, + "timeout_ms": 0, + "filters": ["exited"], + "session_id": session_id, + }), + ) + .unwrap(); + assert_eq!(event_kinds(&exited_events), vec!["exited"]); + + let cursor = exited_events["cursor"].as_u64().unwrap(); + daemon + .dispatch_json( + "amux.capture", + json!({ + "pane_id": strip_display_id(opened["pane_id"].as_str().unwrap(), '%'), + "history": true, + }), + ) + .unwrap(); + thread::sleep(Duration::from_millis(50)); + + let later_events = daemon + .dispatch_json( + "amux.events.read", + json!({ + "cursor": cursor, + "timeout_ms": 0, + "filters": ["exited"], + }), + ) + .unwrap(); + assert!(later_events["events"].as_array().unwrap().is_empty()); + + daemon + .dispatch_json("session.close", json!({ "session_id": "exit-demo" })) + .unwrap(); + } + + #[test] + fn generated_session_ids_skip_existing_custom_ids() { + let daemon = Daemon::new("test"); + let custom = daemon + .dispatch_json( + "terminal.open", + json!({ + "session_id": "sess-1", + "command": "/bin/cat", + "cols": 80, + "rows": 24, + }), + ) + .unwrap(); + let generated = daemon + .dispatch_json( + "terminal.open", + json!({ + "command": "/bin/cat", + "cols": 80, + "rows": 24, + }), + ) + .unwrap(); + + assert_eq!(custom["session_id"].as_str().unwrap(), "sess-1"); + assert_eq!(generated["session_id"].as_str().unwrap(), "sess-2"); + + daemon + .dispatch_json("session.close", json!({ "session_id": "sess-1" })) + .unwrap(); + daemon + .dispatch_json("session.close", json!({ "session_id": "sess-2" })) + .unwrap(); + } + + #[test] + fn bare_tmux_pane_targets_do_not_escape_the_active_window() { + let daemon = Daemon::new("test"); + tmux_exec(&daemon, &["new-session", "-s", "alpha", "/bin/cat"]); + tmux_exec(&daemon, &["new-session", "-s", "beta", "/bin/cat"]); + tmux_exec(&daemon, &["split-window", "-t", "beta:0", "/bin/cat"]); + + let err = match daemon.tmux_resolve_pane(Some("1")) { + Ok(_) => panic!("bare pane index should stay scoped to the active window"), + Err(err) => err, + }; + assert_eq!(err, "pane not found: 1"); + + daemon + .dispatch_json("session.close", json!({ "session_id": "alpha" })) + .unwrap(); + daemon + .dispatch_json("session.close", json!({ "session_id": "beta" })) + .unwrap(); + } + + #[test] + fn direct_tls_keeps_buffered_frames_after_handshake() { + let daemon = Daemon::new("test"); + let (client, server) = UnixStream::pair().unwrap(); + let server_thread = thread::spawn(move || { + daemon.serve_tls_stream(server, "srv", b"secret").unwrap(); + }); + + let claims = TicketClaims { + server_id: "srv".to_string(), + team_id: String::new(), + session_id: String::new(), + attachment_id: String::new(), + capabilities: vec!["session.open".to_string()], + exp: unix_now_secs() + 60, + nonce: "buffered-handshake".to_string(), + }; + let ticket = encode_ticket(claims, b"secret"); + let handshake = serde_json::to_vec(&json!({ "ticket": ticket })).unwrap(); + let request = serde_json::to_vec(&json!({ + "id": 1, + "method": "hello", + "params": {} + })) + .unwrap(); + + let mut client = client; + client.write_all(&handshake).unwrap(); + client.write_all(b"\n").unwrap(); + client.write_all(&request).unwrap(); + client.write_all(b"\n").unwrap(); + client.flush().unwrap(); + client.shutdown(Shutdown::Write).unwrap(); + + let mut reader = BufReader::new(client); + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + let handshake_response: Value = serde_json::from_str(&line).unwrap(); + assert_eq!(handshake_response["ok"].as_bool(), Some(true)); + + line.clear(); + reader.read_line(&mut line).unwrap(); + let request_response: Value = serde_json::from_str(&line).unwrap(); + assert_eq!(request_response["ok"].as_bool(), Some(true)); + assert_eq!( + request_response["result"]["name"].as_str(), + Some("cmuxd-remote") + ); + + server_thread.join().unwrap(); + } +} diff --git a/daemon/remote/rust/src/session.rs b/daemon/remote/rust/src/session.rs new file mode 100644 index 000000000..fd08a9953 --- /dev/null +++ b/daemon/remote/rust/src/session.rs @@ -0,0 +1,258 @@ +use std::collections::BTreeMap; +use std::sync::{Arc, Mutex}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::pane::PaneHandle; + +#[derive(Debug, Clone, serde::Serialize)] +pub struct AttachmentSnapshot { + pub attachment_id: String, + pub cols: u16, + pub rows: u16, + pub updated_at: Option<String>, +} + +#[derive(Debug, Clone, serde::Serialize)] +pub struct SessionSnapshot { + pub session_id: String, + pub attachments: Vec<AttachmentSnapshot>, + pub effective_cols: u16, + pub effective_rows: u16, + pub last_known_cols: u16, + pub last_known_rows: u16, +} + +#[derive(Debug, Clone, serde::Serialize)] +pub struct SessionListEntry { + pub session_id: String, + pub attachment_count: usize, + pub effective_cols: u16, + pub effective_rows: u16, +} + +#[derive(Debug, Clone)] +pub struct AttachmentState { + pub cols: u16, + pub rows: u16, + pub updated_at_ms: u64, +} + +#[derive(Debug)] +pub struct SessionMeta { + pub attachments: BTreeMap<String, AttachmentState>, + pub effective_cols: u16, + pub effective_rows: u16, + pub last_known_cols: u16, + pub last_known_rows: u16, +} + +#[derive(Debug)] +pub struct Window { + pub id: String, + pub name: String, + pub panes: Vec<PaneSlot>, + pub active_pane: usize, + pub last_pane: Option<usize>, +} + +#[derive(Debug)] +pub struct PaneSlot { + pub pane_id: String, + pub command: String, + pub handle: Arc<PaneHandle>, +} + +#[derive(Debug)] +pub struct SessionInner { + pub windows: Vec<Window>, + pub active_window: usize, + pub last_window: Option<usize>, +} + +#[derive(Debug)] +pub struct Session { + pub id: String, + pub meta: Mutex<SessionMeta>, + pub inner: Mutex<SessionInner>, +} + +#[derive(Debug)] +pub enum SessionError { + NotFound, + AttachmentNotFound, + InvalidSize, +} + +impl Session { + pub fn new(id: String) -> Self { + Self { + id, + meta: Mutex::new(SessionMeta { + attachments: BTreeMap::new(), + effective_cols: 0, + effective_rows: 0, + last_known_cols: 0, + last_known_rows: 0, + }), + inner: Mutex::new(SessionInner { + windows: Vec::new(), + active_window: 0, + last_window: None, + }), + } + } + + pub fn attach(&self, attachment_id: String, cols: u16, rows: u16) -> Result<(), SessionError> { + let (cols, rows) = normalize_size(cols, rows); + if cols == 0 || rows == 0 { + return Err(SessionError::InvalidSize); + } + let mut meta = self.meta.lock().unwrap(); + meta.attachments.insert( + attachment_id, + AttachmentState { + cols, + rows, + updated_at_ms: now_ms(), + }, + ); + recompute(&mut meta); + Ok(()) + } + + pub fn resize_attachment( + &self, + attachment_id: &str, + cols: u16, + rows: u16, + ) -> Result<(), SessionError> { + let (cols, rows) = normalize_size(cols, rows); + if cols == 0 || rows == 0 { + return Err(SessionError::InvalidSize); + } + let mut meta = self.meta.lock().unwrap(); + let attachment = meta + .attachments + .get_mut(attachment_id) + .ok_or(SessionError::AttachmentNotFound)?; + attachment.cols = cols; + attachment.rows = rows; + attachment.updated_at_ms = now_ms(); + recompute(&mut meta); + Ok(()) + } + + pub fn detach(&self, attachment_id: &str) -> Result<(), SessionError> { + let mut meta = self.meta.lock().unwrap(); + if meta.attachments.remove(attachment_id).is_none() { + return Err(SessionError::AttachmentNotFound); + } + recompute(&mut meta); + Ok(()) + } + + pub fn snapshot(&self) -> SessionSnapshot { + let meta = self.meta.lock().unwrap(); + SessionSnapshot { + session_id: self.id.clone(), + attachments: meta + .attachments + .iter() + .map(|(attachment_id, attachment)| AttachmentSnapshot { + attachment_id: attachment_id.clone(), + cols: attachment.cols, + rows: attachment.rows, + updated_at: Some(format_iso8601(attachment.updated_at_ms)), + }) + .collect(), + effective_cols: meta.effective_cols, + effective_rows: meta.effective_rows, + last_known_cols: meta.last_known_cols, + last_known_rows: meta.last_known_rows, + } + } + + pub fn list_entry(&self) -> SessionListEntry { + let meta = self.meta.lock().unwrap(); + SessionListEntry { + session_id: self.id.clone(), + attachment_count: meta.attachments.len(), + effective_cols: meta.effective_cols, + effective_rows: meta.effective_rows, + } + } + + pub fn effective_size(&self) -> (u16, u16) { + let meta = self.meta.lock().unwrap(); + (meta.effective_cols, meta.effective_rows) + } +} + +pub fn normalize_size(cols: u16, rows: u16) -> (u16, u16) { + let normalized_cols = if cols == 0 { 0 } else { cols.max(2) }; + let normalized_rows = if rows == 0 { 0 } else { rows.max(1) }; + (normalized_cols, normalized_rows) +} + +fn recompute(meta: &mut SessionMeta) { + if meta.attachments.is_empty() { + meta.effective_cols = meta.last_known_cols; + meta.effective_rows = meta.last_known_rows; + return; + } + + let mut min_cols = 0; + let mut min_rows = 0; + for attachment in meta.attachments.values() { + if min_cols == 0 || attachment.cols < min_cols { + min_cols = attachment.cols; + } + if min_rows == 0 || attachment.rows < min_rows { + min_rows = attachment.rows; + } + } + meta.effective_cols = min_cols; + meta.effective_rows = min_rows; + meta.last_known_cols = min_cols; + meta.last_known_rows = min_rows; +} + +fn now_ms() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|value| value.as_millis() as u64) + .unwrap_or_default() +} + +fn format_iso8601(timestamp_ms: u64) -> String { + let secs = (timestamp_ms / 1000) as libc::time_t; + let millis = timestamp_ms % 1000; + let mut tm = unsafe { std::mem::zeroed::<libc::tm>() }; + let tm_ptr = unsafe { libc::gmtime_r(&secs, &mut tm) }; + if tm_ptr.is_null() { + return format!("{}", timestamp_ms / 1000); + } + format!( + "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:03}Z", + tm.tm_year + 1900, + tm.tm_mon + 1, + tm.tm_mday, + tm.tm_hour, + tm.tm_min, + tm.tm_sec, + millis + ) +} + +#[cfg(test)] +mod tests { + use super::format_iso8601; + + #[test] + fn format_iso8601_emits_rfc3339_timestamp() { + assert_eq!( + format_iso8601(1_704_067_445_678), + "2024-01-01T00:04:05.678Z" + ); + } +} diff --git a/daemon/remote/rust/src/tmux.rs b/daemon/remote/rust/src/tmux.rs new file mode 100644 index 000000000..d8290e243 --- /dev/null +++ b/daemon/remote/rust/src/tmux.rs @@ -0,0 +1 @@ +// Intentionally small for the first Rust cut. diff --git a/daemon/remote/zig/build.zig b/daemon/remote/zig/build.zig index c57868728..325add84d 100644 --- a/daemon/remote/zig/build.zig +++ b/daemon/remote/zig/build.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); @@ -15,9 +16,13 @@ pub fn build(b: *std.Build) void { }); mod.addOptions("build_options", build_options); + const disable_ghostty_simd = + !builtin.os.tag.isDarwin() and target.result.os.tag.isDarwin(); + if (b.lazyDependency("ghostty", .{ .target = target, .optimize = optimize, + .simd = !disable_ghostty_simd, })) |dep| { mod.addImport("ghostty-vt", dep.module("ghostty-vt")); } diff --git a/daemon/remote/zig/src/local_peer_auth.zig b/daemon/remote/zig/src/local_peer_auth.zig index cca782a23..b3eb898eb 100644 --- a/daemon/remote/zig/src/local_peer_auth.zig +++ b/daemon/remote/zig/src/local_peer_auth.zig @@ -2,6 +2,7 @@ const std = @import("std"); const builtin = @import("builtin"); const c = @cImport({ + @cDefine("_GNU_SOURCE", "1"); @cInclude("sys/types.h"); @cInclude("sys/socket.h"); @cInclude("unistd.h"); diff --git a/docs/amux-rust-backend-spec.md b/docs/amux-rust-backend-spec.md new file mode 100644 index 000000000..5c8a1fe08 --- /dev/null +++ b/docs/amux-rust-backend-spec.md @@ -0,0 +1,222 @@ +# AMux Rust Backend Spec + +Last updated: April 5, 2026 +Base branch: `task-move-ios-app-into-cmux-repo` + +## Goal + +Replace the current backend in this branch with a Rust daemon at `daemon/remote/rust`. + +The Rust daemon must: +- preserve the current cmux JSON-RPC surface used by the app +- add `amux`-style capture, event, and wait primitives +- add a practical tmux compatibility layer for the approved common command subset +- build against the worktree Ghostty source via `GHOSTTY_SOURCE_DIR` + +## Inputs Used For The Rewrite + +- Current backend and transport code in `task-move-ios-app-into-cmux-repo` +- Existing tmux compatibility behavior in [`CLI/cmux.swift`](../CLI/cmux.swift) +- `weill-labs/amux` for the capture/events/wait model +- `libghostty-rs` as a design reference only + +`libghostty-rs` was reviewed, but v1 keeps the daemon on a direct Ghostty shim built from `GHOSTTY_SOURCE_DIR` instead of switching the runtime to that wrapper. + +## Explicit Non-Goals + +- tmux control mode +- full tmux parity +- every tmux option, format variable, hook, or command +- exact tmux layout semantics + +## Required Build Contract + +- `daemon/remote/rust/build.rs` must fail clearly if `GHOSTTY_SOURCE_DIR` is missing or wrong +- the Ghostty shim must be built against the same macOS deployment target as Cargo +- the daemon must stay runnable in local debug builds + +## JSON-RPC Surface + +### Existing cmux RPC that must stay + +- `hello` +- `ping` +- `proxy.open` +- `proxy.close` +- `proxy.write` +- `proxy.read` +- `session.open` +- `session.close` +- `session.attach` +- `session.resize` +- `session.detach` +- `session.status` +- `session.list` +- `session.history` +- `terminal.open` +- `terminal.read` +- `terminal.write` + +### New or expanded amux RPC + +#### `amux.capture` + +Input: +- `session_id` or `pane_id` +- `history` optional bool, default `true` + +Output: +- `pane_id` +- `session_id` +- `capture.cols` +- `capture.rows` +- `capture.cursor_x` +- `capture.cursor_y` +- `capture.history` +- `capture.visible` +- `closed` +- `offset` +- `base_offset` + +#### `amux.events.read` + +Input: +- `cursor` +- `timeout_ms` +- `filters` optional array of kinds +- `session_id` optional +- `pane_id` optional + +Output: +- `cursor` for the next read +- `events[]` + +Event kinds required in v1: +- `session.open` +- `session.close` +- `session.attach` +- `session.resize` +- `session.detach` +- `window.open` +- `window.close` +- `pane.open` +- `pane.close` +- `pane.output` +- `busy` +- `idle` +- `exited` + +#### `amux.wait` + +Input: +- `kind` +- `session_id` or `pane_id` +- `timeout_ms` + +Additional input by kind: +- `signal`: `name`, optional `after_generation` +- `content`: `needle` + +Supported wait kinds in v1: +- `signal` +- `content` +- `busy` +- `idle` +- `ready` +- `exited` + +Output: +- `signal`: `{ "name", "generation" }` +- `content`: `{ "matched": true }` +- `busy`: `{ "busy": true }` +- `idle`: `{ "idle": true }` +- `ready`: `{ "ready": true }` +- `exited`: `{ "exited": true }` + +## tmux Compatibility + +### Transport + +Expose tmux compatibility as: +- `tmux.exec` + +Input: +- `{ "argv": ["command", "...args"] }` + +Output: +- `stdout` +- command-specific fields when useful, such as `session_id`, `window_id`, `pane_id`, `buffer`, `path`, `cols`, `rows`, `generation` + +### Supported tmux commands for v1 + +- `new-session` +- `new-window` +- `split-window` +- `select-window` +- `select-pane` +- `kill-window` +- `kill-pane` +- `send-keys` +- `capture-pane` +- `display-message` +- `list-windows` +- `list-panes` +- `rename-window` +- `resize-pane` +- `wait-for` +- `last-pane` +- `last-window` +- `next-window` +- `previous-window` +- `has-session` +- `set-buffer` +- `show-buffer` +- `save-buffer` +- `list-buffers` +- `paste-buffer` +- `pipe-pane` +- `find-window` +- `respawn-pane` + +The older cmux subset from `CLI/cmux.swift` must remain included inside this list. + +### Target Syntax Required In v1 + +- session id: `name` or `$name` +- window target: `session:window`, `@window-id`, bare window index +- pane target: `session:window.pane`, `%pane-id`, bare pane index in the active window +- commands that accept pane targets must also accept a window target and use that window's active pane + +### Format Variables Required In v1 + +- `#{session_name}` +- `#{session_id}` +- `#{window_id}` +- `#{window_name}` +- `#{window_index}` +- `#{window_active}` +- `#{pane_id}` +- `#{pane_index}` +- `#{pane_active}` +- `#{pane_title}` +- `#{pane_current_path}` +- `#{pane_current_command}` + +### Behavioral Notes + +- `wait-for` is implemented as named signal generation tracking, not tmux control mode +- `capture-pane -p` prints captured text, otherwise stores the text in the default buffer +- `set-buffer` and `paste-buffer` operate on daemon-owned buffers +- `pipe-pane` runs a shell command and pipes the current pane capture to stdin, so it is only safe for trusted callers +- `resize-pane` is direct PTY resizing, not a real tmux layout engine +- `respawn-pane` recreates the pane process in place + +## Acceptance For V1 + +V1 is acceptable when all of the following are true: + +1. `cargo build` succeeds with `GHOSTTY_SOURCE_DIR` pointed at the worktree Ghostty checkout. +2. The daemon serves over a Unix socket and the existing cmux RPC surface still works. +3. `amux.capture`, `amux.events.read`, and `amux.wait` work for real panes. +4. The approved tmux command subset works through `tmux.exec`. +5. Common commands are validated against a live PTY smoke run, not only compile-time checks. diff --git a/docs/local-rust-pty-migration-plan.md b/docs/local-rust-pty-migration-plan.md new file mode 100644 index 000000000..10574d9d5 --- /dev/null +++ b/docs/local-rust-pty-migration-plan.md @@ -0,0 +1,491 @@ +# Local Rust PTY Migration Plan + +This plan replaces the current local macOS child-process adapter with direct Swift-to-Rust socket transport. + +## Decision + +Use two Unix sockets. + +- App socket: Swift UI/control plane +- Rust daemon socket: terminal and tmux/amux/PTY data plane + +This is already the direction of the codebase: + +- app socket env is `CMUX_SOCKET_PATH` in [GhosttyTerminalView.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/GhosttyTerminalView.swift#L2920) +- daemon socket env is `CMUXD_UNIX_PATH` in [Workspace.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/Workspace.swift#L5457) + +## Branch target + +This migration is for the branch that targets `task-move-ios-app-into-cmux-repo`. + +It should not be merged by this task. "Done" here means: + +- implemented in this feature branch +- tested in this feature branch +- ready to merge into `task-move-ios-app-into-cmux-repo` + +## Current problem + +Local macOS terminal surfaces are still provisioned by spawning a child command: + +- [Workspace.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/Workspace.swift#L311) +- [Workspace.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/Workspace.swift#L5776) + +That child command is: + +```sh +cmuxd-remote amux new <surface-id> --socket <daemon-socket> -- <shell-command> +``` + +This is the part we should remove. + +## Key constraint + +We should not guess about Ghostty. + +The good news is the embedded Ghostty API already supports manual I/O: + +- [ghostty.h](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/ghostty/include/ghostty.h#L6) +- [ghostty.h](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/ghostty/include/ghostty.h#L441) +- [ghostty.h](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/ghostty/include/ghostty.h#L1102) + +And iOS already uses it today: + +- sets `io_mode = GHOSTTY_SURFACE_IO_MANUAL` and `io_write_cb` in [GhosttySurfaceView.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/ios/Sources/Terminal/GhosttySurfaceView.swift#L898) +- feeds remote output with `ghostty_surface_process_output` in [GhosttySurfaceView.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/ios/Sources/Terminal/GhosttySurfaceView.swift#L596) + +macOS is still using exec mode with `command` and `working_directory`: + +- [GhosttyTerminalView.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/GhosttyTerminalView.swift#L3495) +- [GhosttyTerminalView.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/GhosttyTerminalView.swift#L3568) + +So the right plan is to port the iOS manual-I/O pattern to macOS for local Rust-backed surfaces. + +## Required end state + +When this migration is done: + +1. Creating a local terminal surface does not spawn `cmuxd-remote amux new ...`. +2. `Cmd+N`, `cmux new-workspace`, new splits, new surfaces, restored workspaces, and any other local terminal creation path all provision through direct Rust RPC. +3. macOS terminal input goes to Rust over the daemon socket, not through a child shell command wrapper. +4. macOS terminal output comes from Rust over the daemon socket and is pushed into Ghostty with manual surface I/O. +5. resize, detach, close, EOF, and exit are handled by the direct transport. +6. `cmux pty` works and is a thin Swift forwarder to Rust. +7. the old local child-process adapter path is removed, not merely bypassed in one code path. + +## Hard acceptance gates + +This work is not done unless every gate below is true at the same time. + +1. There is exactly one local macOS PTY transport path, direct Swift to Rust over the daemon socket. +2. No local workspace or pane creation path shells out to `cmuxd-remote amux new ...`. +3. `cmux pty` forwards to Rust and does not implement a second PTY model in Swift. +4. The same Rust session is exercised by app UI, `cmux pty`, and tmux/amux calls. +5. The old local adapter code is deleted after cutover, not left behind as a silent fallback. +6. Tagged macOS dogfood works for new workspace, split, restore, resize, type, close, EOF, and exit. +7. Automated tests cover the direct path and run in CI. +8. A tmux-ground-truth comparison suite exists for the agreed common tmux subset and passes against both real `tmux` and our Rust backend. +9. A TUI and resize suite exists and passes for the direct Rust PTY path, including `cmux pty`. +10. CI runs the tmux-ground-truth and TUI/resize suites and blocks handoff if either fails. + +If any one of those is false, this migration is incomplete. + +## Things that do not count as done + +These are the lazy versions of the migration and should be rejected: + +- adding a new direct path but leaving the old child bootstrap active for some local creation flows +- making `cmux pty` work by talking to Swift-only state instead of forwarding to Rust +- leaving Swift and Rust with separate PTY lifecycle logic for attach, read, write, resize, or exit +- proving only `Cmd+N` while splits, restore, or CLI flows still use the old path +- relying on manual spot checks without CI coverage for the new path +- keeping a hidden emergency fallback to exec-mode local startup for normal macOS terminals +- claiming tmux parity from eyeballing a terminal instead of from side-by-side assertions against real `tmux` +- testing only line-oriented shell commands while skipping fullscreen TUIs, alternate screen, and resize behavior +- comparing one happy-path pane while skipping window navigation, buffers, `wait-for`, `pipe-pane`, and respawn or kill flows +- adding parity tests that do not actually run in CI + +## Implementation plan + +### 1. Build a macOS manual-I/O terminal bridge + +Add a macOS equivalent of the iOS `GhosttySurfaceBridge` pattern inside [GhosttyTerminalView.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/GhosttyTerminalView.swift). + +It must: + +- create selected surfaces with `GHOSTTY_SURFACE_IO_MANUAL` +- install `io_write_cb` +- forward outbound bytes to a Swift delegate or bridge object +- expose an API to feed inbound bytes via `ghostty_surface_process_output` +- keep existing text input behavior intact for manual surfaces + +Done means: + +- a macOS terminal surface can exist with no `command` and no `working_directory` +- user keystrokes still produce outbound bytes +- injected output still renders in the surface + +### 2. Add a local Rust session controller in Swift + +Create a dedicated Swift-side controller for local Rust-backed sessions. + +It must own: + +- daemon socket discovery +- `terminal.open` +- `terminal.read` +- `terminal.write` +- `session.resize` +- `session.detach` +- `session.status` or equivalent close-state polling if needed + +It should reuse the direct JSON-RPC style already present in [Workspace.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/Workspace.swift#L1053), but for local daemon Unix sockets instead of the remote SSH transport wrapper. + +Done means: + +- one Swift object can open a Rust session for a specific `surface.id` +- one read loop continuously feeds Ghostty output +- one write path sends bytes back to Rust + +### 3. Bind `surface.id` to Rust `session_id` + +Keep the clean identity rule: + +- local terminal `session_id == surface.id` + +That is already how the current child-process path behaves in [Workspace.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/Sources/Workspace.swift#L5777). + +Done means: + +- any local panel can deterministically resolve its Rust session ID without lookup hacks + +### 4. Replace local startup provisioning + +Remove macOS local terminal startup from `LocalTerminalDaemonBridge.startupCommand(...)` for local Rust-backed surfaces. + +Instead: + +- create the Ghostty surface in manual mode +- call `terminal.open` directly against the Rust daemon socket +- start the Swift read loop immediately + +This applies to: + +- initial workspace terminal +- new terminal surface in pane +- split terminal surface +- restored local terminal surfaces + +Done means: + +- these code paths no longer depend on `startupCommandOverride` or shelling out to `cmuxd-remote amux new` + +### 5. Wire terminal lifecycle fully + +The direct bridge must handle: + +- initial open +- steady-state read +- write from UI input +- resize on surface size changes +- detach on close +- EOF and exit propagation +- close cleanup if daemon dies + +Done means: + +- closing a pane or workspace detaches and cleans up the Rust session +- daemon EOF closes the terminal cleanly +- resizing a pane updates Rust session size + +### 6. Keep app socket and Rust socket responsibilities separate + +Do not tunnel PTY traffic through the app socket. + +Use: + +- app socket for workspace, pane, focus, browser, notifications, and UI selection +- Rust daemon socket for terminal bytes, tmux session behavior, amux behavior, and `cmux pty` + +Done means: + +- app socket APIs do not become a hidden PTY proxy layer + +### 7. Implement `cmux pty` as a thin forwarder + +Add a `cmux pty` command in [cmux.swift](/Users/lawrence/fun/cmuxterm-hq/worktrees/feat-amux-rust-backend/CLI/cmux.swift). + +It should: + +- resolve target workspace, pane, surface, and daemon socket from Swift app state +- resolve `session_id` +- forward to Rust for attach, read, write, resize, and wait behavior + +It must not reimplement PTY semantics locally. + +Done means: + +- Swift CLI becomes control-plane resolution plus forwarding +- PTY semantics live in Rust + +### 8. Migrate PTY-shaped tmux compatibility behavior out of Swift + +Move or forward the parts of tmux compatibility that are really terminal-session behavior: + +- attach-like PTY flows +- capture and wait where Rust already owns the session state +- buffer and pipe behaviors if they depend on terminal session semantics + +Do not leave split ownership where Swift has a second local tmux model for PTY features. + +Done means: + +- no duplicate PTY semantics in Swift for the migrated subset + +### 9. Delete the old local child-process adapter + +After the direct path works, remove the old local path for macOS local terminals: + +- no local `sh -c "cmuxd-remote amux new ..."` startup path +- no hidden fallback for local surfaces + +Remote workspace transport can remain separate if it still legitimately uses other startup semantics, but local macOS terminals should not. + +Done means: + +- process tree inspection during local workspace creation shows no `cmuxd-remote amux new ...` child terminal bootstrap + +## Verification plan + +This migration is not done because one or two commands work. It is done when the direct Rust path survives the same command and TUI workloads that people actually use, and when those checks run automatically. + +### A. Real tmux as the comparison oracle + +For the common tmux subset, real `tmux` should be treated as close to ground truth. + +The plan is: + +1. build one parity harness that can drive the same scenario against: + - real `tmux` + - our Rust backend through `cmuxd-remote tmux exec` +2. normalize only volatile fields: + - UUIDs + - generated pane or window ids + - timestamps + - socket paths + - host-specific cwd prefixes where needed +3. compare the rest directly: + - command success or failure + - pane and window topology + - selected pane and window + - captured terminal text + - buffer contents + - wait semantics + - resize-visible state + +The suite must fail loudly on behavior drift. It should not silently accept major differences because normalization was too broad. + +### B. Common tmux command matrix + +These command families need explicit parity coverage against real `tmux`: + +- session and window lifecycle: + - `new-session` + - `has-session` + - `new-window` + - `kill-window` + - `rename-window` + - `last-window` + - `next-window` + - `previous-window` +- pane lifecycle and navigation: + - `split-window` + - `select-window` + - `select-pane` + - `last-pane` + - `resize-pane` + - `kill-pane` + - `respawn-pane` +- terminal I/O and capture: + - `send-keys` + - `capture-pane` + - `display-message` + - `pipe-pane` +- discovery: + - `list-windows` + - `list-panes` + - `find-window` +- synchronization and clipboard-like behavior: + - `wait-for` + - `set-buffer` + - `show-buffer` + - `save-buffer` + - `list-buffers` + - `paste-buffer` + +For each command family, tests should assert both: + +- the immediate command result +- the resulting session state after the command + +### C. TUI matrix + +The direct Rust PTY path must be exercised with interactive programs, not just shell prompts. + +At minimum, the suite must cover: + +- shell prompt baseline: + - prompt appears + - commands echo and render correctly + - scrollback capture remains sane +- fullscreen or alternate-screen behavior: + - a real TUI if available in CI, or a repo-owned fixture TUI if not + - enter alternate screen + - render updates + - exit alternate screen cleanly +- editor or pager behavior: + - `vim -Nu NONE` or equivalent if available + - otherwise a repo-owned minimal fullscreen editor fixture +- long-running output: + - repeated writes + - capture after output growth + - no truncation or stuck offset handling +- `cmux pty` attach path: + - attach to an existing session + - observe prior output + - write input through the CLI + - detach cleanly + +The plan should prefer pinned fixture TUIs in the repo where host tools are not guaranteed. We should not build a suite that only works on one developer laptop. + +### D. Resize matrix + +Resize needs its own coverage because it breaks terminal apps in ways simple shell tests will not catch. + +The suite must cover: + +- initial open size +- repeated grow and shrink cycles +- split-pane resize behavior +- `cmux pty` `SIGWINCH` propagation +- resize while a fullscreen TUI is active +- resize after detach and reattach + +Assertions should include: + +- terminal-reported rows and columns +- capture-pane output that reflects new wrapping +- continued TUI rendering after resize + +### E. CI gates + +These suites need named CI coverage, not an aspirational note. + +CI should include at least: + +- `remote-daemon-tmux-parity` + - installs or uses real `tmux` + - runs the common-command parity harness +- `remote-daemon-tui-resize` + - runs the direct PTY, TUI, and resize matrix +- both jobs upload parity diffs, captures, and logs on failure + +The tmux baseline run is part of the gate. If the baseline `tmux` run itself fails, the job should fail instead of skipping the comparison. + +### F. Execution standard + +There is no lazy version of this verification. + +Before calling this ready to merge into `task-move-ios-app-into-cmux-repo`, we need all of these to be true: + +1. the parity suite exists +2. the TUI and resize suite exists +3. both suites were executed on this branch +4. both suites passed +5. both suites run in CI and are green + +If any of those are false, the work is not ready. + +## Handoff gate + +Do not call this ready on "mostly migrated" status. + +Only call this ready to merge into `task-move-ios-app-into-cmux-repo` when all of this is true: + +1. The direct manual-I/O path is the only local macOS terminal path in production code. +2. `cmux pty` is wired and verified against the live tagged app. +3. The old local child bootstrap code is removed. +4. CI includes the new direct-path coverage and is green. +5. Real `tmux` parity coverage for the agreed common subset is green. +6. TUI and resize coverage for the direct Rust path is green. +7. Manual dogfood on the tagged app passes the behavior checklist below. +8. The final status reported to the user is "ready to merge into `task-move-ios-app-into-cmux-repo`", not "merged". + +## Test plan + +This migration is only done if these all pass. + +### Behavior tests + +Verify all of these on the tagged macOS app: + +- `Cmd+N` +- `cmux new-workspace` +- split terminal +- new surface in existing pane +- restored workspace from saved session +- typing, paste, resize, close +- long-running output +- EOF and exit handling + +### CLI tests + +Verify: + +- `cmux pty ...` against a live tagged app +- tmux subset commands that rely on the same local Rust session IDs + +### Negative proof + +Verify process tree during local workspace creation: + +- Rust daemon `serve --unix` exists +- no `cmuxd-remote amux new ...` child process is created for local terminal startup + +### CI + +Add or update automated coverage so CI proves: + +- manual-I/O macOS surfaces still build +- local Rust daemon startup path works without child bootstrap +- `cmux pty` path works +- existing remote-daemon tests still pass + +### Exit checklist + +Before calling this finished, explicitly confirm all of these: + +- `Cmd+N` uses direct Rust provisioning +- `cmux new-workspace` uses direct Rust provisioning +- split/new pane uses direct Rust provisioning +- restored local workspaces use direct Rust provisioning +- `cmux pty` uses the same Rust session path +- no local child bootstrap remains in code or process tree +- CI is green on the new path + +## Non-goals for the first migration + +These are not excuses to leave the local path half-done. They are simply outside this specific migration: + +- replacing the app socket with the Rust socket +- rewriting browser or notification flows to Rust +- changing remote SSH workspace transport unless needed by shared abstractions + +## Acceptance rule + +Do not call this finished until: + +- the local child-process bootstrap is gone +- the direct Swift-to-Rust PTY path is the only local macOS terminal path +- `cmux pty` works +- app creation flows and CLI flows are tested end to end diff --git a/docs/pty-cli-architecture.md b/docs/pty-cli-architecture.md new file mode 100644 index 000000000..df759564c --- /dev/null +++ b/docs/pty-cli-architecture.md @@ -0,0 +1,220 @@ +# PTY CLI Architecture + +This is the current split between Swift and Rust on `feat-amux-rust-backend`, and the boundary we should move to if `cmux pty` is going to work cleanly. + +## Short answer + +Yes, this makes sense. + +Socket events alone are not enough for a real PTY CLI. A PTY client needs a bidirectional data plane: + +- create or open a session +- attach +- read terminal bytes +- write terminal bytes +- resize on `SIGWINCH` +- detach +- detect EOF and exit + +Rust already has most of that. Swift still owns a lot of control-plane and tmux-compat glue. + +## Current architecture + +```mermaid +flowchart LR + subgraph SwiftApp["Swift app"] + UI["Workspace / TerminalSurface / panes / windows"] + AppSock["TerminalController app socket"] + Bridge["LocalTerminalDaemonBridge"] + end + + subgraph RustDaemon["Rust cmuxd-remote"] + Serve["serve --unix"] + Amux["amux / tmux / session RPC"] + Pty["PTY + terminal session state"] + end + + subgraph SwiftCLI["Swift CLI"] + CLI["cmux"] + Compat["tmux-compat logic in CLI/cmux.swift"] + end + + CLI --> AppSock + Compat --> AppSock + UI --> Bridge + Bridge -->|spawn| Serve + Bridge -->|amux new <surface.id>| Amux + Serve --> Amux + Amux --> Pty +``` + +## What is in Rust right now + +Rust already owns the terminal session transport: + +- `terminal.open` +- `terminal.read` +- `terminal.write` +- `session.attach` +- `session.resize` +- `session.detach` +- `session.status` +- `session.list` +- `session.history` +- `amux.capture` +- `amux.wait` +- `amux.events.read` +- `tmux.exec` + +The interactive Rust CLI path already uses those calls end to end: + +- `cmuxd-remote session new` +- `cmuxd-remote session attach` +- raw terminal mode +- `SIGWINCH` resize propagation +- continuous `terminal.read` +- stdin to `terminal.write` + +So the PTY loop is not theoretical. It already exists in Rust. + +## What is still in Swift right now + +The Swift app still owns UI and app-local control: + +- windows +- workspaces +- panes +- focus and selection +- app socket API in `TerminalController` + +The Swift CLI still owns a lot of tmux-compat behavior in `CLI/cmux.swift`: + +- it connects to the app socket with `SocketClient` +- it resolves current workspace and pane from app state +- it implements many tmux commands by calling app socket methods like `workspace.create`, `workspace.rename`, `surface.split`, `surface.send_text`, `surface.read_text`, `pane.list` +- some compatibility features are still purely local Swift CLI behavior, not Rust daemon behavior: + - `pipe-pane` shells out locally after `surface.read_text` + - `wait-for` uses filesystem signal files + - buffers and hooks live in `~/.cmuxterm/tmux-compat-store.json` + +That means Swift CLI currently does more than forwarding. + +## Important current coupling + +The app already launches local terminal panels through the Rust daemon: + +- `Workspace.makeTerminalPanel(...)` calls `LocalTerminalDaemonBridge.startupCommand(...)` +- that runs `cmuxd-remote amux new <sessionID> --socket <socket> -- <command>` +- today `sessionID` is the terminal surface UUID, so `session_id == surface.id` + +That is useful because it gives us a stable bridge between app pane identity and Rust session identity. + +## Current control plane vs PTY plane + +| Area | Current owner | +| --- | --- | +| App windows, panes, focus, selection | Swift app | +| App Unix socket API | Swift app | +| Local daemon startup and discovery | Swift app | +| PTY lifecycle and terminal byte stream | Rust daemon | +| Interactive attach loop | Rust daemon | +| `amux` capture, wait, events | Rust daemon | +| `tmux.exec` subset | Rust daemon | +| tmux-compat command parsing and fallback behaviors | Swift CLI | + +## Why socket events are not enough + +`amux.events.read` is useful for state change notification, but it does not replace a PTY stream. + +A usable `cmux pty` command needs all of these at minimum: + +- an interactive attach path +- streaming reads with offsets or backpressure +- writes for stdin bytes +- resize propagation +- detach semantics +- exit or EOF semantics + +Events are side-band signals. PTY attach is the main data plane. + +## Desired architecture + +```mermaid +flowchart LR + subgraph SwiftCLI["Swift CLI"] + Thin["cmux thin wrapper"] + end + + subgraph SwiftApp["Swift app"] + AppSock["TerminalController app socket"] + UI["Windows / workspaces / panes"] + end + + subgraph RustDaemon["Rust cmuxd-remote"] + Rpc["session / terminal / amux / tmux"] + Pty["PTY transport and tmux behavior"] + end + + Thin -->|control-plane lookup only| AppSock + Thin -->|PTY attach + tmux forwarding| Rpc + UI --> AppSock + Rpc --> Pty +``` + +## What should move to Rust + +If we want `cmux pty` to be real and for the Swift CLI to mostly forward, Rust should own: + +- PTY attach and detach +- PTY read and write +- resize handling +- capture and wait +- exit status and EOF +- tmux compatibility that is actually about terminal sessions +- buffer, wait, and pipe behaviors that should match the daemon session model + +Swift should keep: + +- app UI state +- focus and selection +- current workspace and pane discovery +- non-terminal app features like browser, notifications, window management + +## Minimal forwarding boundary + +The clean boundary is: + +1. Swift resolves which pane or surface the user means. +2. Swift resolves the daemon socket and Rust `session_id`. +3. Swift forwards the PTY operation to Rust. + +For the local app path, step 2 is already close to trivial because `session_id` is the surface UUID. + +## Concrete implication for `cmux pty` + +I think `cmux pty` should not be implemented as another Swift-side pseudo-terminal feature. + +It should be a thin path over Rust operations, probably one of: + +- Swift CLI shells out to the Rust daemon binary for interactive subcommands +- Swift CLI talks directly to the Rust daemon socket for PTY subcommands + +Either way, the Swift CLI should stop owning PTY semantics. + +## Gaps before that architecture is true + +These are the main things still not aligned: + +- Swift CLI still implements tmux-compat itself against the app socket +- some tmux-compat features are local-only Swift behavior, not daemon behavior +- there is no dedicated `cmux pty` forwarding surface yet +- the app socket is still the source of truth for current pane selection, but the Rust daemon is the source of truth for terminal bytes + +## Recommendation + +Do the next pass in this order: + +1. Add a small Swift control-plane lookup that returns `workspace_id`, `surface_id`, `session_id`, and daemon socket for the current or requested pane. +2. Add `cmux pty ...` in Swift as a thin forwarder to Rust. +3. Move tmux-compat commands that are really PTY or tmux session behavior out of `CLI/cmux.swift` and into Rust. +4. Keep only app-specific commands in Swift. diff --git a/ios/Sources/Terminal/GhosttySurfaceView.swift b/ios/Sources/Terminal/GhosttySurfaceView.swift index 8b4f9e608..e28698c69 100644 --- a/ios/Sources/Terminal/GhosttySurfaceView.swift +++ b/ios/Sources/Terminal/GhosttySurfaceView.swift @@ -777,7 +777,7 @@ final class GhosttySurfaceView: UIView, TerminalSurfaceHosting { guard let surface else { return } cursorBlinkVisible = true lastBlinkToggle = CACurrentMediaTime() - ghostty_surface_set_cursor_blink_visible(surface, true) + ghostty_surface_refresh(surface) } @objc func handleDisplayLinkFire() { @@ -787,12 +787,11 @@ final class GhosttySurfaceView: UIView, TerminalSurfaceHosting { if now - lastBlinkToggle >= 0.5 { cursorBlinkVisible.toggle() lastBlinkToggle = now - ghostty_surface_set_cursor_blink_visible(surface, cursorBlinkVisible) blinkChanged = true } if needsDraw || blinkChanged { needsDraw = false - ghostty_surface_update_and_draw(surface) + ghostty_surface_refresh(surface) } } diff --git a/ios/Sources/Terminal/LiveAnchormuxSupport.swift b/ios/Sources/Terminal/LiveAnchormuxSupport.swift index e608d6006..33d6b708e 100644 --- a/ios/Sources/Terminal/LiveAnchormuxSupport.swift +++ b/ios/Sources/Terminal/LiveAnchormuxSupport.swift @@ -881,6 +881,8 @@ private final class LiveAnchormuxFixtureTransport: @unchecked Sendable, Terminal liveAnchormuxLog("transport.event notice session=\(sessionID) message=\(message)") case .trustedHostKey(let hostKey): liveAnchormuxLog("transport.event trusted_host_key session=\(sessionID) key=\(hostKey)") + case .remotePlatform(let platform): + liveAnchormuxLog("transport.event remote_platform session=\(sessionID) os=\(platform.goOS) arch=\(platform.goArch)") } if let snapshotting = transport as? TerminalRemoteDaemonResumeStateSnapshotting { updateResumeState(snapshotting.remoteDaemonResumeStateSnapshot()) diff --git a/ios/Sources/Terminal/TerminalSidebarStore.swift b/ios/Sources/Terminal/TerminalSidebarStore.swift index 4f0c6e935..12b7a9a0d 100644 --- a/ios/Sources/Terminal/TerminalSidebarStore.swift +++ b/ios/Sources/Terminal/TerminalSidebarStore.swift @@ -1204,6 +1204,8 @@ final class TerminalSessionController: ObservableObject { liveAnchormuxLog("controller.event notice message=\(message)") case .trustedHostKey(let hostKey): liveAnchormuxLog("controller.event trusted_host_key key=\(hostKey)") + case .remotePlatform(let platform): + liveAnchormuxLog("controller.event remote_platform os=\(platform.goOS) arch=\(platform.goArch)") } } switch event { @@ -1235,6 +1237,8 @@ final class TerminalSessionController: ObservableObject { setStatusMessage(message) case .trustedHostKey(let hostKey): onUpdate?(.trustedHostKey(hostKey)) + case .remotePlatform(let platform): + terminalSurface?.updateRemotePlatform(platform) } } diff --git a/scripts/build-ghostty-cli-helper.sh b/scripts/build-ghostty-cli-helper.sh index 3d82322b0..e0e28fbaa 100755 --- a/scripts/build-ghostty-cli-helper.sh +++ b/scripts/build-ghostty-cli-helper.sh @@ -99,6 +99,7 @@ build_helper() { -Dapp-runtime=none -Demit-macos-app=true -Demit-xcframework=false + -Dxcframework-target=native -Doptimize=ReleaseFast --prefix "$prefix" diff --git a/scripts/build_remote_daemon_release_assets.sh b/scripts/build_remote_daemon_release_assets.sh index eb3a9d77a..5d062a74d 100755 --- a/scripts/build_remote_daemon_release_assets.sh +++ b/scripts/build_remote_daemon_release_assets.sh @@ -65,6 +65,18 @@ fi SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" DAEMON_ROOT="${REPO_ROOT}/daemon/remote/zig" + +for required_path in \ + "${REPO_ROOT}/ghostty/build.zig.zon" \ + "${REPO_ROOT}/vendor/tls.zig/build.zig.zon" +do + if [[ ! -f "$required_path" ]]; then + echo "error: missing build dependency at $required_path" >&2 + echo "hint: initialize submodules with 'git submodule update --init --recursive'" >&2 + exit 1 + fi +done + mkdir -p "$OUTPUT_DIR" OUTPUT_DIR="$(cd "$OUTPUT_DIR" && pwd)" rm -f "$OUTPUT_DIR"/cmuxd-remote-* "$OUTPUT_DIR"/cmuxd-remote-checksums.txt "$OUTPUT_DIR"/cmuxd-remote-manifest.json diff --git a/scripts/ghosttykit-checksums.txt b/scripts/ghosttykit-checksums.txt index cb339e001..3334ec1c5 100644 --- a/scripts/ghosttykit-checksums.txt +++ b/scripts/ghosttykit-checksums.txt @@ -9,3 +9,4 @@ c47010b80cd9ae6d1ab744c120f011a465521ea3 d6904870a3c920b2787b1c4b950cfdef232606b 404a3f175ba6baafabc46cac807194883e040980 bcbd2954f4746fe5bcb4bfca6efeddd3ea355fda2836371f4c7150271c58acbd bc9be90a21997a4e5f06bf15ae2ec0f937c2dc42 6b83b66768e8bba871a3753ae8ffbaabd03370b306c429cd86c9cdcc8db82589 727197d2c0ecb160d496837467933d49614c9a98 399846587c18c55a23b8c86186eaac57511025084c26d743817c34b8a4ed1950 +bc0ee3142fe661f7342a9b76d712a417d59d5aae 073ea7f8ee5f889b3208365942373b53fa9cd71d0406d4599f7f15e43917394e diff --git a/scripts/launch-tagged-automation.sh b/scripts/launch-tagged-automation.sh index 2262e3d5d..8c79116c6 100755 --- a/scripts/launch-tagged-automation.sh +++ b/scripts/launch-tagged-automation.sh @@ -2,7 +2,7 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -source "$SCRIPT_DIR/zig-build-env.sh" +ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" usage() { cat <<'EOF' @@ -125,15 +125,17 @@ BID="com.cmuxterm.app.debug.${TAG_ID}" SOCK="/tmp/cmux-debug-${TAG_SLUG}.sock" DSOCK="$HOME/Library/Application Support/cmux/cmuxd-dev-${TAG_SLUG}.sock" LOG="/tmp/cmux-debug-${TAG_SLUG}.log" -DAEMON_BIN="$PWD/daemon/remote/zig/zig-out/bin/cmuxd-remote" +DAEMON_MANIFEST="$ROOT/daemon/remote/rust/Cargo.toml" +DAEMON_BIN="$ROOT/daemon/remote/rust/target/debug/cmuxd-remote" if [[ ! -d "$APP" ]]; then echo "error: tagged app not found at $APP" >&2 exit 1 fi -if [[ -d "$PWD/daemon/remote/zig" ]]; then - (cd "$PWD/daemon/remote/zig" && cmux_run_zig build -Doptimize=ReleaseFast) +if [[ -f "$DAEMON_MANIFEST" ]]; then + GHOSTTY_SOURCE_DIR="${GHOSTTY_SOURCE_DIR:-$ROOT/ghostty}" \ + cargo build --manifest-path "$DAEMON_MANIFEST" >/dev/null fi /usr/bin/osascript -e "tell application id \"${BID}\" to quit" >/dev/null 2>&1 || true diff --git a/scripts/open-desktop-ios-anchormux-live.sh b/scripts/open-desktop-ios-anchormux-live.sh index 4255e0df1..a56d014e2 100755 --- a/scripts/open-desktop-ios-anchormux-live.sh +++ b/scripts/open-desktop-ios-anchormux-live.sh @@ -158,7 +158,7 @@ if [[ -z "$WORKSPACE_ID" || -z "$SURFACE_ID" ]]; then exit 1 fi -DAEMON_BIN="$ROOT/daemon/remote/zig/zig-out/bin/cmuxd-remote" +DAEMON_BIN="$ROOT/daemon/remote/rust/target/debug/cmuxd-remote" if ! "$DAEMON_BIN" amux status "$SURFACE_ID" --socket "$DAEMON_SOCKET" >/dev/null 2>&1; then deadline=$((SECONDS + 20)) while (( SECONDS < deadline )); do diff --git a/scripts/reload.sh b/scripts/reload.sh index a89bbd610..1d01479d2 100755 --- a/scripts/reload.sh +++ b/scripts/reload.sh @@ -281,9 +281,11 @@ if [[ -n "$TAG" ]]; then fi fi -LOCAL_REMOTE_DAEMON_BIN="$PWD/daemon/remote/zig/zig-out/bin/cmuxd-remote" -if [[ -n "${TAG_SLUG:-}" && -d "$PWD/daemon/remote/zig" ]]; then - (cd "$PWD/daemon/remote/zig" && cmux_run_zig build -Doptimize=ReleaseFast) +LOCAL_REMOTE_DAEMON_MANIFEST="$PWD/daemon/remote/rust/Cargo.toml" +LOCAL_REMOTE_DAEMON_BIN="$PWD/daemon/remote/rust/target/debug/cmuxd-remote" +if [[ -f "$LOCAL_REMOTE_DAEMON_MANIFEST" ]]; then + GHOSTTY_SOURCE_DIR="${GHOSTTY_SOURCE_DIR:-$PWD/ghostty}" \ + cargo build --manifest-path "$LOCAL_REMOTE_DAEMON_MANIFEST" fi XCODEBUILD_ARGS=( diff --git a/scripts/verify-desktop-ios-anchormux-sharing.sh b/scripts/verify-desktop-ios-anchormux-sharing.sh index db9d4c00c..9d9e2f6b3 100644 --- a/scripts/verify-desktop-ios-anchormux-sharing.sh +++ b/scripts/verify-desktop-ios-anchormux-sharing.sh @@ -25,7 +25,7 @@ RELAY_LOG="/tmp/cmux-desktop-ios-anchormux-${SANITIZED_TAG}-relay.log" TEST_LOG="/tmp/cmux-desktop-ios-anchormux-${SANITIZED_TAG}-ios.log" RELOAD_LOG="/tmp/cmux-desktop-ios-anchormux-${SANITIZED_TAG}-reload.log" CONFIG_PATH="/tmp/cmux-live-anchormux-${SANITIZED_TAG}.json" -DAEMON_BIN="$ROOT/daemon/remote/zig/zig-out/bin/cmuxd-remote" +DAEMON_BIN="$ROOT/daemon/remote/rust/target/debug/cmuxd-remote" RELAY_PID="" TEST_PID="" diff --git a/scripts/verify-local-daemon-session-sharing.sh b/scripts/verify-local-daemon-session-sharing.sh index 0b2ef9095..5c2ab2cbf 100755 --- a/scripts/verify-local-daemon-session-sharing.sh +++ b/scripts/verify-local-daemon-session-sharing.sh @@ -5,7 +5,7 @@ usage() { cat <<'EOF' Usage: ./scripts/verify-local-daemon-session-sharing.sh <tag> -Builds the Zig daemon, launches the tagged cmux app with local-daemon wiring +Builds the Rust daemon, launches the tagged cmux app with local-daemon wiring enabled, and verifies the app auto-starts cmuxd-remote when a local terminal session is created. EOF @@ -18,7 +18,6 @@ fi TAG="$1" ROOT="$(cd "$(dirname "$0")/.." && pwd)" -source "$ROOT/scripts/zig-build-env.sh" SANITIZED_TAG="$(echo "$TAG" | tr '[:upper:]' '[:lower:]' | sed -E 's/[^a-z0-9]+/-/g; s/^-+//; s/-+$//; s/-+/-/g')" BUNDLE_ID="com.cmuxterm.app.debug.$(echo "$TAG" | tr '[:upper:]' '[:lower:]' | sed -E 's/[^a-z0-9]+/./g; s/^\\.+//; s/\\.+$//; s/\\.+/./g')" APP_PROCESS_NAME="cmux DEV ${TAG}" @@ -26,7 +25,8 @@ APP_SUPPORT_DIR="$HOME/Library/Application Support/cmux" APP_SOCKET="/tmp/cmux-debug-${SANITIZED_TAG}.sock" DAEMON_SOCKET="${APP_SUPPORT_DIR}/cmuxd-dev-${SANITIZED_TAG}.sock" DAEMON_LOG="/tmp/cmuxd-local-${SANITIZED_TAG}.log" -DAEMON_BIN="$ROOT/daemon/remote/zig/zig-out/bin/cmuxd-remote" +DAEMON_MANIFEST="$ROOT/daemon/remote/rust/Cargo.toml" +DAEMON_BIN="$ROOT/daemon/remote/rust/target/debug/cmuxd-remote" CLI_BIN="$HOME/Library/Developer/Xcode/DerivedData/cmux-${SANITIZED_TAG}/Build/Products/Debug/cmux" APP="$HOME/Library/Developer/Xcode/DerivedData/cmux-${SANITIZED_TAG}/Build/Products/Debug/cmux DEV ${TAG}.app" APP_LOG="/tmp/cmux-local-daemon-${SANITIZED_TAG}.log" @@ -40,9 +40,8 @@ cleanup() { trap cleanup EXIT -cd "$ROOT/daemon/remote/zig" -cmux_run_zig build -Doptimize=ReleaseFast -cd "$ROOT" +GHOSTTY_SOURCE_DIR="${GHOSTTY_SOURCE_DIR:-$ROOT/ghostty}" \ + cargo build --manifest-path "$DAEMON_MANIFEST" >/dev/null pkill -f "cmuxd-remote serve --unix --socket ${DAEMON_SOCKET}" >/dev/null 2>&1 || true rm -f "$DAEMON_SOCKET" "$DAEMON_LOG" @@ -143,6 +142,7 @@ if not workspace_ready: raise SystemExit(f"error: app never reached workspace-ready state: {last}") probe_deadline = time.time() + 10.0 +fresh_workspace_ready = False while time.time() < probe_deadline: probe = None try: @@ -150,6 +150,12 @@ while time.time() < probe_deadline: probe.connect() if not probe.ping(): raise RuntimeError("ping returned false") + try: + probe.activate_app() + except Exception: + pass + _ = probe.current_workspace() + fresh_workspace_ready = True print("ready") break except Exception as e: @@ -161,7 +167,7 @@ while time.time() < probe_deadline: probe.close() except Exception: pass -else: +if not fresh_workspace_ready: raise SystemExit(f"error: app ready-check reconnect/ping failed: {last}") if client is not None: diff --git a/scripts/verify-remote-session-cli.sh b/scripts/verify-remote-session-cli.sh index 9e2594c4d..d6ba249e0 100755 --- a/scripts/verify-remote-session-cli.sh +++ b/scripts/verify-remote-session-cli.sh @@ -2,8 +2,8 @@ set -euo pipefail ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" -source "$ROOT/scripts/zig-build-env.sh" -DAEMON_DIR="$ROOT/daemon/remote/zig" +DAEMON_MANIFEST="$ROOT/daemon/remote/rust/Cargo.toml" +DAEMON_BIN="$ROOT/daemon/remote/rust/target/debug/cmuxd-remote" if ! command -v expect >/dev/null 2>&1; then echo "ERROR: expect is required" @@ -31,13 +31,11 @@ cleanup() { } trap cleanup EXIT -echo "=== Build Zig daemon ===" -( - cd "$DAEMON_DIR" - cmux_run_zig build -Doptimize=ReleaseFast >/dev/null -) +echo "=== Build Rust daemon ===" +GHOSTTY_SOURCE_DIR="${GHOSTTY_SOURCE_DIR:-$ROOT/ghostty}" \ + cargo build --manifest-path "$DAEMON_MANIFEST" >/dev/null -BIN="$DAEMON_DIR/zig-out/bin/cmuxd-remote" +BIN="$DAEMON_BIN" if [[ ! -x "$BIN" ]]; then echo "ERROR: daemon binary missing at $BIN" exit 1 diff --git a/tests/test_cmux_pty_cli_bridge.sh b/tests/test_cmux_pty_cli_bridge.sh new file mode 100755 index 000000000..715fa86ae --- /dev/null +++ b/tests/test_cmux_pty_cli_bridge.sh @@ -0,0 +1,184 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT="$(cd "$(dirname "$0")/.." && pwd)" +TMP_DIR="$(mktemp -d /tmp/cmux-pty-cli.XXXXXX)" +DAEMON_SOCKET="$TMP_DIR/daemon.sock" +APP_SOCKET="$TMP_DIR/app.sock" +DAEMON_LOG="$TMP_DIR/daemon.log" +FAKE_APP_LOG="$TMP_DIR/fake-app.log" + +cleanup() { + if [[ -n "${FAKE_APP_PID:-}" ]]; then + kill "$FAKE_APP_PID" >/dev/null 2>&1 || true + fi + if [[ -n "${DAEMON_PID:-}" ]]; then + kill "$DAEMON_PID" >/dev/null 2>&1 || true + fi + rm -rf "$TMP_DIR" +} +trap cleanup EXIT + +CLI_BIN="${CMUX_CLI_BIN:-}" +if [[ -z "$CLI_BIN" ]]; then + CLI_BIN="$( + find "$HOME/Library/Developer/Xcode/DerivedData" -path "*/Build/Products/Debug/cmux" -exec stat -f '%m %N' {} \; \ + | sort -nr \ + | head -1 \ + | cut -d' ' -f2- + )" +fi + +if [[ -z "$CLI_BIN" || ! -x "$CLI_BIN" ]]; then + echo "cmux CLI binary not found; set CMUX_CLI_BIN" >&2 + exit 1 +fi + +GHOSTTY_SOURCE_DIR="$ROOT/ghostty" cargo build --manifest-path "$ROOT/daemon/remote/rust/Cargo.toml" >/dev/null +DAEMON_BIN="$ROOT/daemon/remote/rust/target/debug/cmuxd-remote" + +"$DAEMON_BIN" serve --unix --socket "$DAEMON_SOCKET" >"$DAEMON_LOG" 2>&1 & +DAEMON_PID=$! + +python3 - <<'PY' "$DAEMON_SOCKET" +import socket, sys, time +path = sys.argv[1] +deadline = time.time() + 10 +while time.time() < deadline: + try: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(path) + sock.close() + raise SystemExit(0) + except OSError: + time.sleep(0.05) +raise SystemExit("daemon socket did not become ready") +PY + +"$DAEMON_BIN" session new pty-cli --socket "$DAEMON_SOCKET" --quiet --detached -- /bin/sh "$ROOT/daemon/remote/compat/testdata/ready_cat.sh" >/dev/null + +python3 - <<'PY' "$APP_SOCKET" "$DAEMON_SOCKET" >"$FAKE_APP_LOG" 2>&1 & +import json, os, socket, sys +app_socket, daemon_socket = sys.argv[1], sys.argv[2] +try: + os.unlink(app_socket) +except FileNotFoundError: + pass +server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +server.bind(app_socket) +server.listen(4) +while True: + conn, _ = server.accept() + with conn: + file = conn.makefile("rwb") + while True: + line = file.readline() + if not line: + break + req = json.loads(line.decode("utf-8")) + method = req.get("method") + if method == "surface.daemon_info": + resp = { + "id": req.get("id"), + "ok": True, + "result": { + "socket_path": daemon_socket, + "session_id": "pty-cli", + "workspace_id": "workspace:1", + "surface_id": "surface:1", + }, + } + else: + resp = { + "id": req.get("id"), + "ok": False, + "error": {"code": "method_not_found", "message": method or ""}, + } + file.write((json.dumps(resp) + "\n").encode("utf-8")) + file.flush() +PY +FAKE_APP_PID=$! + +python3 - <<'PY' "$CLI_BIN" "$APP_SOCKET" "$DAEMON_BIN" "$DAEMON_SOCKET" +import fcntl +import os +import pty +import re +import select +import struct +import subprocess +import sys +import termios +import time + +cli_bin, app_socket, daemon_bin, daemon_socket = sys.argv[1:5] +env = os.environ.copy() +env["CMUX_SOCKET_PATH"] = app_socket + +def daemon_history(): + return subprocess.run( + [daemon_bin, "session", "history", "pty-cli", "--socket", daemon_socket], + text=True, + capture_output=True, + check=True, + ).stdout + +def daemon_status(): + return subprocess.run( + [daemon_bin, "session", "status", "pty-cli", "--socket", daemon_socket], + text=True, + capture_output=True, + check=True, + ).stdout.strip() + +pid, fd = pty.fork() +if pid == 0: + os.execve( + cli_bin, + [cli_bin, "pty", "--workspace", "workspace:1", "--surface", "surface:1"], + env, + ) + +capture = bytearray() + +def pump(timeout=0.2): + r, _, _ = select.select([fd], [], [], timeout) + if not r: + return b"" + chunk = os.read(fd, 65536) + capture.extend(chunk) + return chunk + +deadline = time.time() + 10 +while time.time() < deadline: + pump() + if b"READY" in capture: + break +else: + raise SystemExit(f"cmux pty never showed READY: {capture.decode('utf-8', 'replace')}") + +os.write(fd, b"bridge-ok\n") +deadline = time.time() + 5 +while time.time() < deadline: + if "bridge-ok" in daemon_history(): + break + time.sleep(0.05) +else: + raise SystemExit("cmux pty write never reached daemon history") + +fcntl.ioctl(fd, termios.TIOCSWINSZ, struct.pack("HHHH", 31, 91, 0, 0)) +deadline = time.time() + 5 +while time.time() < deadline: + if daemon_status().endswith("91x31"): + break + time.sleep(0.05) +else: + raise SystemExit(f"cmux pty resize never reached daemon status: {daemon_status()}") + +subprocess.run([daemon_bin, "session", "kill", "pty-cli", "--socket", daemon_socket], check=True, capture_output=True) +_, status = os.waitpid(pid, 0) +if status != 0: + raise SystemExit(f"cmux pty exited with status {status}") + +print({"history_contains_bridge_ok": True, "status": "91x31", "exit_status": status}) +PY diff --git a/tests/test_cmux_pty_tmux_parity.sh b/tests/test_cmux_pty_tmux_parity.sh new file mode 100755 index 000000000..123db2de7 --- /dev/null +++ b/tests/test_cmux_pty_tmux_parity.sh @@ -0,0 +1,329 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT="$(cd "$(dirname "$0")/.." && pwd)" +TMP_DIR="$(mktemp -d /tmp/cmux-pty-tmux-parity.XXXXXX)" +DAEMON_SOCKET="$TMP_DIR/daemon.sock" +APP_SOCKET="$TMP_DIR/app.sock" +DAEMON_LOG="$TMP_DIR/daemon.log" +FAKE_APP_LOG="$TMP_DIR/fake-app.log" +TMUX_SOCKET="cmux-pty-parity-$$" +TMUX_TMPDIR="$TMP_DIR/tmux" +READY_CAT="$ROOT/daemon/remote/compat/testdata/ready_cat.sh" + +cleanup() { + if [[ -n "${FAKE_APP_PID:-}" ]]; then + kill "$FAKE_APP_PID" >/dev/null 2>&1 || true + fi + if [[ -n "${DAEMON_PID:-}" ]]; then + kill "$DAEMON_PID" >/dev/null 2>&1 || true + fi + if command -v tmux >/dev/null 2>&1; then + TMUX_TMPDIR="$TMUX_TMPDIR" tmux -f /dev/null -L "$TMUX_SOCKET" kill-server >/dev/null 2>&1 || true + fi + rm -rf "$TMP_DIR" +} +trap cleanup EXIT + +if ! command -v tmux >/dev/null 2>&1; then + echo "tmux not found in PATH" >&2 + exit 1 +fi + +CLI_BIN="${CMUX_CLI_BIN:-}" +if [[ -z "$CLI_BIN" ]]; then + CLI_BIN="$( + find "$HOME/Library/Developer/Xcode/DerivedData" -path "*/Build/Products/Debug/cmux" -exec stat -f '%m %N' {} \; \ + | sort -nr \ + | head -1 \ + | cut -d' ' -f2- + )" +fi + +if [[ -z "$CLI_BIN" || ! -x "$CLI_BIN" ]]; then + echo "cmux CLI binary not found; set CMUX_CLI_BIN" >&2 + exit 1 +fi + +mkdir -p "$TMUX_TMPDIR" + +DAEMON_BIN="${CMUX_DAEMON_BIN:-$ROOT/daemon/remote/rust/target/debug/cmuxd-remote}" +if [[ -z "${CMUX_DAEMON_BIN:-}" ]]; then + GHOSTTY_SOURCE_DIR="$ROOT/ghostty" cargo build --manifest-path "$ROOT/daemon/remote/rust/Cargo.toml" >/dev/null +fi +if [[ ! -x "$DAEMON_BIN" ]]; then + echo "cmuxd-remote binary not found; set CMUX_DAEMON_BIN or build daemon/remote/rust" >&2 + exit 1 +fi + +"$DAEMON_BIN" serve --unix --socket "$DAEMON_SOCKET" >"$DAEMON_LOG" 2>&1 & +DAEMON_PID=$! + +python3 - <<'PY' "$DAEMON_SOCKET" +import socket +import sys +import time + +path = sys.argv[1] +deadline = time.time() + 10 +while time.time() < deadline: + try: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(path) + sock.close() + raise SystemExit(0) + except OSError: + time.sleep(0.05) +raise SystemExit("daemon socket did not become ready") +PY + +"$DAEMON_BIN" session new pty-cli --socket "$DAEMON_SOCKET" --quiet --detached -- /bin/sh "$READY_CAT" >/dev/null + +python3 - <<'PY' "$APP_SOCKET" "$DAEMON_SOCKET" >"$FAKE_APP_LOG" 2>&1 & +import json +import os +import socket +import sys + +app_socket, daemon_socket = sys.argv[1], sys.argv[2] +try: + os.unlink(app_socket) +except FileNotFoundError: + pass +server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +server.bind(app_socket) +server.listen(8) +while True: + conn, _ = server.accept() + with conn: + file = conn.makefile("rwb") + while True: + line = file.readline() + if not line: + break + req = json.loads(line.decode("utf-8")) + method = req.get("method") + if method == "surface.daemon_info": + resp = { + "id": req.get("id"), + "ok": True, + "result": { + "socket_path": daemon_socket, + "session_id": "pty-cli", + "workspace_id": "workspace:1", + "surface_id": "surface:1", + }, + } + else: + resp = { + "id": req.get("id"), + "ok": False, + "error": {"code": "method_not_found", "message": method or ""}, + } + file.write((json.dumps(resp) + "\n").encode("utf-8")) + file.flush() +PY +FAKE_APP_PID=$! + +python3 - <<'PY' "$CLI_BIN" "$APP_SOCKET" "$DAEMON_BIN" "$DAEMON_SOCKET" "$TMUX_SOCKET" "$TMUX_TMPDIR" "$READY_CAT" +import fcntl +import os +import pty +import select +import shutil +import struct +import subprocess +import sys +import termios +import time + +cli_bin, app_socket, daemon_bin, daemon_socket, tmux_socket, tmux_tmpdir, ready_cat = sys.argv[1:8] + +cmux_env = os.environ.copy() +cmux_env["CMUX_SOCKET_PATH"] = app_socket + +tmux_env = os.environ.copy() +tmux_env["TMUX_TMPDIR"] = tmux_tmpdir +# Use a conservative TERM value so the tmux side works on minimal test hosts. +tmux_env["TERM"] = "vt100" + +tmux_bin = shutil.which("tmux") +if not tmux_bin: + raise SystemExit("tmux not found in PATH") +tmux_base = [tmux_bin, "-f", "/dev/null", "-L", tmux_socket] + + +def run_tmux(*args: str) -> subprocess.CompletedProcess[str]: + return subprocess.run(tmux_base + list(args), capture_output=True, text=True, env=tmux_env, check=True) + + +def daemon_history() -> str: + return subprocess.run( + [daemon_bin, "session", "history", "pty-cli", "--socket", daemon_socket], + text=True, + capture_output=True, + check=True, + ).stdout + + +def daemon_status() -> str: + return subprocess.run( + [daemon_bin, "session", "status", "pty-cli", "--socket", daemon_socket], + text=True, + capture_output=True, + check=True, + ).stdout.strip() + + +def tmux_pane_size() -> str: + return run_tmux("display-message", "-p", "-t", "pty-parity:0.0", "#{pane_width}x#{pane_height}").stdout.strip() + + +def start_attach(argv, env): + pid, fd = pty.fork() + if pid == 0: + os.execvpe(argv[0], argv, env) + return pid, fd + + +def pump(fd: int, capture: bytearray, timeout: float = 0.2) -> bytes: + r, _, _ = select.select([fd], [], [], timeout) + if not r: + return b"" + chunk = os.read(fd, 65536) + capture.extend(chunk) + return chunk + + +def wait_for_capture(fd: int, capture: bytearray, token: bytes, timeout: float, label: str): + deadline = time.time() + timeout + while time.time() < deadline: + pump(fd, capture) + if token in capture: + return + raise SystemExit(f"{label} never showed {token!r}: {capture.decode('utf-8', 'replace')}") + + +def assert_contains(capture: bytearray, token: str, label: str): + text = capture.decode("utf-8", "replace") + if token not in text: + raise SystemExit(f"{label} missing {token!r}: {text!r}") + + +def wait_for(pred, timeout: float, label: str): + deadline = time.time() + timeout + while time.time() < deadline: + if pred(): + return + time.sleep(0.05) + raise SystemExit(f"Timed out waiting for {label}") + + +def ensure_alive(pid: int, label: str, capture: bytearray): + done, status = os.waitpid(pid, os.WNOHANG) + if done == 0: + return + text = capture.decode("utf-8", "replace") + if os.WIFSIGNALED(status): + raise SystemExit(f"{label} exited by signal {os.WTERMSIG(status)}: {text!r}") + code = os.WEXITSTATUS(status) if os.WIFEXITED(status) else status + raise SystemExit(f"{label} exited early with status {code}: {text!r}") + + +def write_one(fd: int, data: bytes, pid: int, label: str, capture: bytearray): + try: + os.write(fd, data) + except OSError as exc: + ensure_alive(pid, label, capture) + raise SystemExit(f"{label} write failed: {exc}") + + +def write_both(data: bytes): + write_one(cmux_fd, data, cmux_pid, "cmux pty", cmux_capture) + write_one(tmux_fd, data, tmux_pid, "tmux attach", tmux_capture) + + +run_tmux("new-session", "-d", "-s", "pty-parity", "/bin/sh", ready_cat) +run_tmux("set-option", "-t", "pty-parity", "status", "off") +wait_for( + lambda: "READY" in run_tmux("capture-pane", "-p", "-t", "pty-parity:0.0", "-S", "-20").stdout, + 5.0, + "tmux READY in pane history", +) + +cmux_pid, cmux_fd = start_attach([cli_bin, "pty", "--workspace", "workspace:1", "--surface", "surface:1"], cmux_env) +tmux_pid, tmux_fd = start_attach(tmux_base + ["attach", "-t", "pty-parity"], tmux_env) + +cmux_capture = bytearray() +tmux_capture = bytearray() + +deadline = time.time() + 10.0 +while time.time() < deadline: + pump(cmux_fd, cmux_capture, 0.05) + pump(tmux_fd, tmux_capture, 0.05) + ensure_alive(cmux_pid, "cmux pty", cmux_capture) + ensure_alive(tmux_pid, "tmux attach", tmux_capture) + if b"READY" in cmux_capture: + break +else: + raise SystemExit(f"cmux pty never showed b'READY': {cmux_capture.decode('utf-8', 'replace')}") + +write_both(b"parity-hello\n") +wait_for_capture(cmux_fd, cmux_capture, b"parity-hello", 5.0, "cmux pty") +wait_for_capture(tmux_fd, tmux_capture, b"parity-hello", 5.0, "tmux attach") + +# Fragmented OSC 11 query bytes. This reproduces the short-buffer path +# that previously caused a crash/infinite loop in cmux's terminal surface path. +for frag in (b"\x1b", b"]", b"1", b"1", b";", b"?", b"\x07"): + write_both(frag) + time.sleep(0.02) +write_both(b"frag-ok\n") + +wait_for_capture(cmux_fd, cmux_capture, b"frag-ok", 5.0, "cmux pty after fragmented osc") +wait_for_capture(tmux_fd, tmux_capture, b"frag-ok", 5.0, "tmux attach after fragmented osc") + +wait_for(lambda: "parity-hello" in daemon_history() and "frag-ok" in daemon_history(), 5.0, "daemon history tokens") + +fcntl.ioctl(cmux_fd, termios.TIOCSWINSZ, struct.pack("HHHH", 31, 91, 0, 0)) +fcntl.ioctl(tmux_fd, termios.TIOCSWINSZ, struct.pack("HHHH", 31, 91, 0, 0)) + +wait_for(lambda: daemon_status().endswith("91x31"), 5.0, "cmux daemon resize") +wait_for(lambda: tmux_pane_size() == "91x31", 5.0, "tmux pane resize") + +for token in ("parity-hello", "frag-ok"): + assert_contains(cmux_capture, token, "cmux transcript") + assert_contains(tmux_capture, token, "tmux transcript") + +subprocess.run([daemon_bin, "session", "kill", "pty-cli", "--socket", daemon_socket], check=True, capture_output=True) +run_tmux("kill-session", "-t", "pty-parity") + +def wait_for_exit(pid: int, label: str): + deadline = time.time() + 5 + while time.time() < deadline: + done, status = os.waitpid(pid, os.WNOHANG) + if done == 0: + time.sleep(0.05) + continue + if os.WIFSIGNALED(status): + raise SystemExit(f"{label} terminated by signal {os.WTERMSIG(status)}") + return os.WEXITSTATUS(status) if os.WIFEXITED(status) else 0 + raise SystemExit(f"{label} did not exit") + +cmux_exit = wait_for_exit(cmux_pid, "cmux pty") +tmux_exit = wait_for_exit(tmux_pid, "tmux attach") + +if cmux_exit != 0: + raise SystemExit(f"cmux pty exited with status {cmux_exit}") +if tmux_exit not in (0, 1): + raise SystemExit(f"tmux attach exited with unexpected status {tmux_exit}") + +print( + { + "cmux_exit": cmux_exit, + "tmux_exit": tmux_exit, + "daemon_status": "91x31", + "tmux_status": "91x31", + "tokens": ["parity-hello", "frag-ok"], + } +) +PY